1# 2# Architectures that offer an FUNCTION_TRACER implementation should 3# select HAVE_FUNCTION_TRACER: 4# 5 6config USER_STACKTRACE_SUPPORT 7 bool 8 9config NOP_TRACER 10 bool 11 12config HAVE_FTRACE_NMI_ENTER 13 bool 14 help 15 See Documentation/trace/ftrace-design.txt 16 17config HAVE_FUNCTION_TRACER 18 bool 19 help 20 See Documentation/trace/ftrace-design.txt 21 22config HAVE_FUNCTION_GRAPH_TRACER 23 bool 24 help 25 See Documentation/trace/ftrace-design.txt 26 27config HAVE_DYNAMIC_FTRACE 28 bool 29 help 30 See Documentation/trace/ftrace-design.txt 31 32config HAVE_DYNAMIC_FTRACE_WITH_REGS 33 bool 34 35config HAVE_FTRACE_MCOUNT_RECORD 36 bool 37 help 38 See Documentation/trace/ftrace-design.txt 39 40config HAVE_SYSCALL_TRACEPOINTS 41 bool 42 help 43 See Documentation/trace/ftrace-design.txt 44 45config HAVE_FENTRY 46 bool 47 help 48 Arch supports the gcc options -pg with -mfentry 49 50config HAVE_C_RECORDMCOUNT 51 bool 52 help 53 C version of recordmcount available? 54 55config TRACER_MAX_TRACE 56 bool 57 58config TRACE_CLOCK 59 bool 60 61config RING_BUFFER 62 bool 63 select TRACE_CLOCK 64 select IRQ_WORK 65 66config FTRACE_NMI_ENTER 67 bool 68 depends on HAVE_FTRACE_NMI_ENTER 69 default y 70 71config EVENT_TRACING 72 select CONTEXT_SWITCH_TRACER 73 select GLOB 74 bool 75 76config CONTEXT_SWITCH_TRACER 77 bool 78 79config RING_BUFFER_ALLOW_SWAP 80 bool 81 help 82 Allow the use of ring_buffer_swap_cpu. 83 Adds a very slight overhead to tracing when enabled. 84 85# All tracer options should select GENERIC_TRACER. For those options that are 86# enabled by all tracers (context switch and event tracer) they select TRACING. 87# This allows those options to appear when no other tracer is selected. But the 88# options do not appear when something else selects it. We need the two options 89# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 90# hiding of the automatic options. 91 92config TRACING 93 bool 94 select DEBUG_FS 95 select RING_BUFFER 96 select STACKTRACE if STACKTRACE_SUPPORT 97 select TRACEPOINTS 98 select NOP_TRACER 99 select BINARY_PRINTF 100 select EVENT_TRACING 101 select TRACE_CLOCK 102 103config GENERIC_TRACER 104 bool 105 select TRACING 106 107# 108# Minimum requirements an architecture has to meet for us to 109# be able to offer generic tracing facilities: 110# 111config TRACING_SUPPORT 112 bool 113 # PPC32 has no irqflags tracing support, but it can use most of the 114 # tracers anyway, they were tested to build and work. Note that new 115 # exceptions to this list aren't welcomed, better implement the 116 # irqflags tracing for your architecture. 117 depends on TRACE_IRQFLAGS_SUPPORT || PPC32 118 depends on STACKTRACE_SUPPORT 119 default y 120 121if TRACING_SUPPORT 122 123menuconfig FTRACE 124 bool "Tracers" 125 default y if DEBUG_KERNEL 126 help 127 Enable the kernel tracing infrastructure. 128 129if FTRACE 130 131config FUNCTION_TRACER 132 bool "Kernel Function Tracer" 133 depends on HAVE_FUNCTION_TRACER 134 select KALLSYMS 135 select GENERIC_TRACER 136 select CONTEXT_SWITCH_TRACER 137 select GLOB 138 select TASKS_RCU if PREEMPT 139 help 140 Enable the kernel to trace every kernel function. This is done 141 by using a compiler feature to insert a small, 5-byte No-Operation 142 instruction at the beginning of every kernel function, which NOP 143 sequence is then dynamically patched into a tracer call when 144 tracing is enabled by the administrator. If it's runtime disabled 145 (the bootup default), then the overhead of the instructions is very 146 small and not measurable even in micro-benchmarks. 147 148config FUNCTION_GRAPH_TRACER 149 bool "Kernel Function Graph Tracer" 150 depends on HAVE_FUNCTION_GRAPH_TRACER 151 depends on FUNCTION_TRACER 152 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 153 default y 154 help 155 Enable the kernel to trace a function at both its return 156 and its entry. 157 Its first purpose is to trace the duration of functions and 158 draw a call graph for each thread with some information like 159 the return value. This is done by setting the current return 160 address on the current task structure into a stack of calls. 161 162 163config PREEMPTIRQ_EVENTS 164 bool "Enable trace events for preempt and irq disable/enable" 165 select TRACE_IRQFLAGS 166 depends on DEBUG_PREEMPT || !PROVE_LOCKING 167 depends on TRACING 168 default n 169 help 170 Enable tracing of disable and enable events for preemption and irqs. 171 For tracing preempt disable/enable events, DEBUG_PREEMPT must be 172 enabled. For tracing irq disable/enable events, PROVE_LOCKING must 173 be disabled. 174 175config IRQSOFF_TRACER 176 bool "Interrupts-off Latency Tracer" 177 default n 178 depends on TRACE_IRQFLAGS_SUPPORT 179 depends on !ARCH_USES_GETTIMEOFFSET 180 select TRACE_IRQFLAGS 181 select GENERIC_TRACER 182 select TRACER_MAX_TRACE 183 select RING_BUFFER_ALLOW_SWAP 184 select TRACER_SNAPSHOT 185 select TRACER_SNAPSHOT_PER_CPU_SWAP 186 help 187 This option measures the time spent in irqs-off critical 188 sections, with microsecond accuracy. 189 190 The default measurement method is a maximum search, which is 191 disabled by default and can be runtime (re-)started 192 via: 193 194 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 195 196 (Note that kernel size and overhead increase with this option 197 enabled. This option and the preempt-off timing option can be 198 used together or separately.) 199 200config PREEMPT_TRACER 201 bool "Preemption-off Latency Tracer" 202 default n 203 depends on !ARCH_USES_GETTIMEOFFSET 204 depends on PREEMPT 205 select GENERIC_TRACER 206 select TRACER_MAX_TRACE 207 select RING_BUFFER_ALLOW_SWAP 208 select TRACER_SNAPSHOT 209 select TRACER_SNAPSHOT_PER_CPU_SWAP 210 help 211 This option measures the time spent in preemption-off critical 212 sections, with microsecond accuracy. 213 214 The default measurement method is a maximum search, which is 215 disabled by default and can be runtime (re-)started 216 via: 217 218 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 219 220 (Note that kernel size and overhead increase with this option 221 enabled. This option and the irqs-off timing option can be 222 used together or separately.) 223 224config SCHED_TRACER 225 bool "Scheduling Latency Tracer" 226 select GENERIC_TRACER 227 select CONTEXT_SWITCH_TRACER 228 select TRACER_MAX_TRACE 229 select TRACER_SNAPSHOT 230 help 231 This tracer tracks the latency of the highest priority task 232 to be scheduled in, starting from the point it has woken up. 233 234config HWLAT_TRACER 235 bool "Tracer to detect hardware latencies (like SMIs)" 236 select GENERIC_TRACER 237 help 238 This tracer, when enabled will create one or more kernel threads, 239 depending on what the cpumask file is set to, which each thread 240 spinning in a loop looking for interruptions caused by 241 something other than the kernel. For example, if a 242 System Management Interrupt (SMI) takes a noticeable amount of 243 time, this tracer will detect it. This is useful for testing 244 if a system is reliable for Real Time tasks. 245 246 Some files are created in the tracing directory when this 247 is enabled: 248 249 hwlat_detector/width - time in usecs for how long to spin for 250 hwlat_detector/window - time in usecs between the start of each 251 iteration 252 253 A kernel thread is created that will spin with interrupts disabled 254 for "width" microseconds in every "window" cycle. It will not spin 255 for "window - width" microseconds, where the system can 256 continue to operate. 257 258 The output will appear in the trace and trace_pipe files. 259 260 When the tracer is not running, it has no affect on the system, 261 but when it is running, it can cause the system to be 262 periodically non responsive. Do not run this tracer on a 263 production system. 264 265 To enable this tracer, echo in "hwlat" into the current_tracer 266 file. Every time a latency is greater than tracing_thresh, it will 267 be recorded into the ring buffer. 268 269config ENABLE_DEFAULT_TRACERS 270 bool "Trace process context switches and events" 271 depends on !GENERIC_TRACER 272 select TRACING 273 help 274 This tracer hooks to various trace points in the kernel, 275 allowing the user to pick and choose which trace point they 276 want to trace. It also includes the sched_switch tracer plugin. 277 278config FTRACE_SYSCALLS 279 bool "Trace syscalls" 280 depends on HAVE_SYSCALL_TRACEPOINTS 281 select GENERIC_TRACER 282 select KALLSYMS 283 help 284 Basic tracer to catch the syscall entry and exit events. 285 286config TRACER_SNAPSHOT 287 bool "Create a snapshot trace buffer" 288 select TRACER_MAX_TRACE 289 help 290 Allow tracing users to take snapshot of the current buffer using the 291 ftrace interface, e.g.: 292 293 echo 1 > /sys/kernel/debug/tracing/snapshot 294 cat snapshot 295 296config TRACER_SNAPSHOT_PER_CPU_SWAP 297 bool "Allow snapshot to swap per CPU" 298 depends on TRACER_SNAPSHOT 299 select RING_BUFFER_ALLOW_SWAP 300 help 301 Allow doing a snapshot of a single CPU buffer instead of a 302 full swap (all buffers). If this is set, then the following is 303 allowed: 304 305 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 306 307 After which, only the tracing buffer for CPU 2 was swapped with 308 the main tracing buffer, and the other CPU buffers remain the same. 309 310 When this is enabled, this adds a little more overhead to the 311 trace recording, as it needs to add some checks to synchronize 312 recording with swaps. But this does not affect the performance 313 of the overall system. This is enabled by default when the preempt 314 or irq latency tracers are enabled, as those need to swap as well 315 and already adds the overhead (plus a lot more). 316 317config TRACE_BRANCH_PROFILING 318 bool 319 select GENERIC_TRACER 320 321choice 322 prompt "Branch Profiling" 323 default BRANCH_PROFILE_NONE 324 help 325 The branch profiling is a software profiler. It will add hooks 326 into the C conditionals to test which path a branch takes. 327 328 The likely/unlikely profiler only looks at the conditions that 329 are annotated with a likely or unlikely macro. 330 331 The "all branch" profiler will profile every if-statement in the 332 kernel. This profiler will also enable the likely/unlikely 333 profiler. 334 335 Either of the above profilers adds a bit of overhead to the system. 336 If unsure, choose "No branch profiling". 337 338config BRANCH_PROFILE_NONE 339 bool "No branch profiling" 340 help 341 No branch profiling. Branch profiling adds a bit of overhead. 342 Only enable it if you want to analyse the branching behavior. 343 Otherwise keep it disabled. 344 345config PROFILE_ANNOTATED_BRANCHES 346 bool "Trace likely/unlikely profiler" 347 select TRACE_BRANCH_PROFILING 348 help 349 This tracer profiles all likely and unlikely macros 350 in the kernel. It will display the results in: 351 352 /sys/kernel/debug/tracing/trace_stat/branch_annotated 353 354 Note: this will add a significant overhead; only turn this 355 on if you need to profile the system's use of these macros. 356 357config PROFILE_ALL_BRANCHES 358 bool "Profile all if conditionals" if !FORTIFY_SOURCE 359 select TRACE_BRANCH_PROFILING 360 help 361 This tracer profiles all branch conditions. Every if () 362 taken in the kernel is recorded whether it hit or miss. 363 The results will be displayed in: 364 365 /sys/kernel/debug/tracing/trace_stat/branch_all 366 367 This option also enables the likely/unlikely profiler. 368 369 This configuration, when enabled, will impose a great overhead 370 on the system. This should only be enabled when the system 371 is to be analyzed in much detail. 372endchoice 373 374config TRACING_BRANCHES 375 bool 376 help 377 Selected by tracers that will trace the likely and unlikely 378 conditions. This prevents the tracers themselves from being 379 profiled. Profiling the tracing infrastructure can only happen 380 when the likelys and unlikelys are not being traced. 381 382config BRANCH_TRACER 383 bool "Trace likely/unlikely instances" 384 depends on TRACE_BRANCH_PROFILING 385 select TRACING_BRANCHES 386 help 387 This traces the events of likely and unlikely condition 388 calls in the kernel. The difference between this and the 389 "Trace likely/unlikely profiler" is that this is not a 390 histogram of the callers, but actually places the calling 391 events into a running trace buffer to see when and where the 392 events happened, as well as their results. 393 394 Say N if unsure. 395 396config STACK_TRACER 397 bool "Trace max stack" 398 depends on HAVE_FUNCTION_TRACER 399 select FUNCTION_TRACER 400 select STACKTRACE 401 select KALLSYMS 402 help 403 This special tracer records the maximum stack footprint of the 404 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 405 406 This tracer works by hooking into every function call that the 407 kernel executes, and keeping a maximum stack depth value and 408 stack-trace saved. If this is configured with DYNAMIC_FTRACE 409 then it will not have any overhead while the stack tracer 410 is disabled. 411 412 To enable the stack tracer on bootup, pass in 'stacktrace' 413 on the kernel command line. 414 415 The stack tracer can also be enabled or disabled via the 416 sysctl kernel.stack_tracer_enabled 417 418 Say N if unsure. 419 420config BLK_DEV_IO_TRACE 421 bool "Support for tracing block IO actions" 422 depends on SYSFS 423 depends on BLOCK 424 select RELAY 425 select DEBUG_FS 426 select TRACEPOINTS 427 select GENERIC_TRACER 428 select STACKTRACE 429 help 430 Say Y here if you want to be able to trace the block layer actions 431 on a given queue. Tracing allows you to see any traffic happening 432 on a block device queue. For more information (and the userspace 433 support tools needed), fetch the blktrace tools from: 434 435 git://git.kernel.dk/blktrace.git 436 437 Tracing also is possible using the ftrace interface, e.g.: 438 439 echo 1 > /sys/block/sda/sda1/trace/enable 440 echo blk > /sys/kernel/debug/tracing/current_tracer 441 cat /sys/kernel/debug/tracing/trace_pipe 442 443 If unsure, say N. 444 445config KPROBE_EVENTS 446 depends on KPROBES 447 depends on HAVE_REGS_AND_STACK_ACCESS_API 448 bool "Enable kprobes-based dynamic events" 449 select TRACING 450 select PROBE_EVENTS 451 default y 452 help 453 This allows the user to add tracing events (similar to tracepoints) 454 on the fly via the ftrace interface. See 455 Documentation/trace/kprobetrace.txt for more details. 456 457 Those events can be inserted wherever kprobes can probe, and record 458 various register and memory values. 459 460 This option is also required by perf-probe subcommand of perf tools. 461 If you want to use perf tools, this option is strongly recommended. 462 463config UPROBE_EVENTS 464 bool "Enable uprobes-based dynamic events" 465 depends on ARCH_SUPPORTS_UPROBES 466 depends on MMU 467 depends on PERF_EVENTS 468 select UPROBES 469 select PROBE_EVENTS 470 select TRACING 471 default y 472 help 473 This allows the user to add tracing events on top of userspace 474 dynamic events (similar to tracepoints) on the fly via the trace 475 events interface. Those events can be inserted wherever uprobes 476 can probe, and record various registers. 477 This option is required if you plan to use perf-probe subcommand 478 of perf tools on user space applications. 479 480config BPF_EVENTS 481 depends on BPF_SYSCALL 482 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 483 bool 484 default y 485 help 486 This allows the user to attach BPF programs to kprobe events. 487 488config PROBE_EVENTS 489 def_bool n 490 491config DYNAMIC_FTRACE 492 bool "enable/disable function tracing dynamically" 493 depends on FUNCTION_TRACER 494 depends on HAVE_DYNAMIC_FTRACE 495 default y 496 help 497 This option will modify all the calls to function tracing 498 dynamically (will patch them out of the binary image and 499 replace them with a No-Op instruction) on boot up. During 500 compile time, a table is made of all the locations that ftrace 501 can function trace, and this table is linked into the kernel 502 image. When this is enabled, functions can be individually 503 enabled, and the functions not enabled will not affect 504 performance of the system. 505 506 See the files in /sys/kernel/debug/tracing: 507 available_filter_functions 508 set_ftrace_filter 509 set_ftrace_notrace 510 511 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 512 otherwise has native performance as long as no tracing is active. 513 514config DYNAMIC_FTRACE_WITH_REGS 515 def_bool y 516 depends on DYNAMIC_FTRACE 517 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 518 519config FUNCTION_PROFILER 520 bool "Kernel function profiler" 521 depends on FUNCTION_TRACER 522 default n 523 help 524 This option enables the kernel function profiler. A file is created 525 in debugfs called function_profile_enabled which defaults to zero. 526 When a 1 is echoed into this file profiling begins, and when a 527 zero is entered, profiling stops. A "functions" file is created in 528 the trace_stats directory; this file shows the list of functions that 529 have been hit and their counters. 530 531 If in doubt, say N. 532 533config FTRACE_MCOUNT_RECORD 534 def_bool y 535 depends on DYNAMIC_FTRACE 536 depends on HAVE_FTRACE_MCOUNT_RECORD 537 538config FTRACE_SELFTEST 539 bool 540 541config FTRACE_STARTUP_TEST 542 bool "Perform a startup test on ftrace" 543 depends on GENERIC_TRACER 544 select FTRACE_SELFTEST 545 help 546 This option performs a series of startup tests on ftrace. On bootup 547 a series of tests are made to verify that the tracer is 548 functioning properly. It will do tests on all the configured 549 tracers of ftrace. 550 551config EVENT_TRACE_TEST_SYSCALLS 552 bool "Run selftest on syscall events" 553 depends on FTRACE_STARTUP_TEST 554 help 555 This option will also enable testing every syscall event. 556 It only enables the event and disables it and runs various loads 557 with the event enabled. This adds a bit more time for kernel boot 558 up since it runs this on every system call defined. 559 560 TBD - enable a way to actually call the syscalls as we test their 561 events 562 563config MMIOTRACE 564 bool "Memory mapped IO tracing" 565 depends on HAVE_MMIOTRACE_SUPPORT && PCI 566 select GENERIC_TRACER 567 help 568 Mmiotrace traces Memory Mapped I/O access and is meant for 569 debugging and reverse engineering. It is called from the ioremap 570 implementation and works via page faults. Tracing is disabled by 571 default and can be enabled at run-time. 572 573 See Documentation/trace/mmiotrace.txt. 574 If you are not helping to develop drivers, say N. 575 576config TRACING_MAP 577 bool 578 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 579 help 580 tracing_map is a special-purpose lock-free map for tracing, 581 separated out as a stand-alone facility in order to allow it 582 to be shared between multiple tracers. It isn't meant to be 583 generally used outside of that context, and is normally 584 selected by tracers that use it. 585 586config HIST_TRIGGERS 587 bool "Histogram triggers" 588 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 589 select TRACING_MAP 590 select TRACING 591 default n 592 help 593 Hist triggers allow one or more arbitrary trace event fields 594 to be aggregated into hash tables and dumped to stdout by 595 reading a debugfs/tracefs file. They're useful for 596 gathering quick and dirty (though precise) summaries of 597 event activity as an initial guide for further investigation 598 using more advanced tools. 599 600 See Documentation/trace/events.txt. 601 If in doubt, say N. 602 603config MMIOTRACE_TEST 604 tristate "Test module for mmiotrace" 605 depends on MMIOTRACE && m 606 help 607 This is a dumb module for testing mmiotrace. It is very dangerous 608 as it will write garbage to IO memory starting at a given address. 609 However, it should be safe to use on e.g. unused portion of VRAM. 610 611 Say N, unless you absolutely know what you are doing. 612 613config TRACEPOINT_BENCHMARK 614 bool "Add tracepoint that benchmarks tracepoints" 615 help 616 This option creates the tracepoint "benchmark:benchmark_event". 617 When the tracepoint is enabled, it kicks off a kernel thread that 618 goes into an infinite loop (calling cond_sched() to let other tasks 619 run), and calls the tracepoint. Each iteration will record the time 620 it took to write to the tracepoint and the next iteration that 621 data will be passed to the tracepoint itself. That is, the tracepoint 622 will report the time it took to do the previous tracepoint. 623 The string written to the tracepoint is a static string of 128 bytes 624 to keep the time the same. The initial string is simply a write of 625 "START". The second string records the cold cache time of the first 626 write which is not added to the rest of the calculations. 627 628 As it is a tight loop, it benchmarks as hot cache. That's fine because 629 we care most about hot paths that are probably in cache already. 630 631 An example of the output: 632 633 START 634 first=3672 [COLD CACHED] 635 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 636 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 637 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 638 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 639 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 640 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 641 642 643config RING_BUFFER_BENCHMARK 644 tristate "Ring buffer benchmark stress tester" 645 depends on RING_BUFFER 646 help 647 This option creates a test to stress the ring buffer and benchmark it. 648 It creates its own ring buffer such that it will not interfere with 649 any other users of the ring buffer (such as ftrace). It then creates 650 a producer and consumer that will run for 10 seconds and sleep for 651 10 seconds. Each interval it will print out the number of events 652 it recorded and give a rough estimate of how long each iteration took. 653 654 It does not disable interrupts or raise its priority, so it may be 655 affected by processes that are running. 656 657 If unsure, say N. 658 659config RING_BUFFER_STARTUP_TEST 660 bool "Ring buffer startup self test" 661 depends on RING_BUFFER 662 help 663 Run a simple self test on the ring buffer on boot up. Late in the 664 kernel boot sequence, the test will start that kicks off 665 a thread per cpu. Each thread will write various size events 666 into the ring buffer. Another thread is created to send IPIs 667 to each of the threads, where the IPI handler will also write 668 to the ring buffer, to test/stress the nesting ability. 669 If any anomalies are discovered, a warning will be displayed 670 and all ring buffers will be disabled. 671 672 The test runs for 10 seconds. This will slow your boot time 673 by at least 10 more seconds. 674 675 At the end of the test, statics and more checks are done. 676 It will output the stats of each per cpu buffer. What 677 was written, the sizes, what was read, what was lost, and 678 other similar details. 679 680 If unsure, say N 681 682config TRACE_EVAL_MAP_FILE 683 bool "Show eval mappings for trace events" 684 depends on TRACING 685 help 686 The "print fmt" of the trace events will show the enum/sizeof names 687 instead of their values. This can cause problems for user space tools 688 that use this string to parse the raw data as user space does not know 689 how to convert the string to its value. 690 691 To fix this, there's a special macro in the kernel that can be used 692 to convert an enum/sizeof into its value. If this macro is used, then 693 the print fmt strings will be converted to their values. 694 695 If something does not get converted properly, this option can be 696 used to show what enums/sizeof the kernel tried to convert. 697 698 This option is for debugging the conversions. A file is created 699 in the tracing directory called "eval_map" that will show the 700 names matched with their values and what trace event system they 701 belong too. 702 703 Normally, the mapping of the strings to values will be freed after 704 boot up or module load. With this option, they will not be freed, as 705 they are needed for the "eval_map" file. Enabling this option will 706 increase the memory footprint of the running kernel. 707 708 If unsure, say N 709 710config TRACING_EVENTS_GPIO 711 bool "Trace gpio events" 712 depends on GPIOLIB 713 default y 714 help 715 Enable tracing events for gpio subsystem 716 717endif # FTRACE 718 719endif # TRACING_SUPPORT 720 721