1# 2# Architectures that offer an FUNCTION_TRACER implementation should 3# select HAVE_FUNCTION_TRACER: 4# 5 6config USER_STACKTRACE_SUPPORT 7 bool 8 9config NOP_TRACER 10 bool 11 12config HAVE_FTRACE_NMI_ENTER 13 bool 14 help 15 See Documentation/trace/ftrace-design.txt 16 17config HAVE_FUNCTION_TRACER 18 bool 19 help 20 See Documentation/trace/ftrace-design.txt 21 22config HAVE_FUNCTION_GRAPH_TRACER 23 bool 24 help 25 See Documentation/trace/ftrace-design.txt 26 27config HAVE_DYNAMIC_FTRACE 28 bool 29 help 30 See Documentation/trace/ftrace-design.txt 31 32config HAVE_DYNAMIC_FTRACE_WITH_REGS 33 bool 34 35config HAVE_FTRACE_MCOUNT_RECORD 36 bool 37 help 38 See Documentation/trace/ftrace-design.txt 39 40config HAVE_SYSCALL_TRACEPOINTS 41 bool 42 help 43 See Documentation/trace/ftrace-design.txt 44 45config HAVE_FENTRY 46 bool 47 help 48 Arch supports the gcc options -pg with -mfentry 49 50config HAVE_C_RECORDMCOUNT 51 bool 52 help 53 C version of recordmcount available? 54 55config TRACER_MAX_TRACE 56 bool 57 58config TRACE_CLOCK 59 bool 60 61config RING_BUFFER 62 bool 63 select TRACE_CLOCK 64 select IRQ_WORK 65 66config FTRACE_NMI_ENTER 67 bool 68 depends on HAVE_FTRACE_NMI_ENTER 69 default y 70 71config EVENT_TRACING 72 select CONTEXT_SWITCH_TRACER 73 select GLOB 74 bool 75 76config CONTEXT_SWITCH_TRACER 77 bool 78 79config RING_BUFFER_ALLOW_SWAP 80 bool 81 help 82 Allow the use of ring_buffer_swap_cpu. 83 Adds a very slight overhead to tracing when enabled. 84 85# All tracer options should select GENERIC_TRACER. For those options that are 86# enabled by all tracers (context switch and event tracer) they select TRACING. 87# This allows those options to appear when no other tracer is selected. But the 88# options do not appear when something else selects it. We need the two options 89# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 90# hiding of the automatic options. 91 92config TRACING 93 bool 94 select DEBUG_FS 95 select RING_BUFFER 96 select STACKTRACE if STACKTRACE_SUPPORT 97 select TRACEPOINTS 98 select NOP_TRACER 99 select BINARY_PRINTF 100 select EVENT_TRACING 101 select TRACE_CLOCK 102 103config GENERIC_TRACER 104 bool 105 select TRACING 106 107# 108# Minimum requirements an architecture has to meet for us to 109# be able to offer generic tracing facilities: 110# 111config TRACING_SUPPORT 112 bool 113 # PPC32 has no irqflags tracing support, but it can use most of the 114 # tracers anyway, they were tested to build and work. Note that new 115 # exceptions to this list aren't welcomed, better implement the 116 # irqflags tracing for your architecture. 117 depends on TRACE_IRQFLAGS_SUPPORT || PPC32 118 depends on STACKTRACE_SUPPORT 119 default y 120 121if TRACING_SUPPORT 122 123menuconfig FTRACE 124 bool "Tracers" 125 default y if DEBUG_KERNEL 126 help 127 Enable the kernel tracing infrastructure. 128 129if FTRACE 130 131config FUNCTION_TRACER 132 bool "Kernel Function Tracer" 133 depends on HAVE_FUNCTION_TRACER 134 select KALLSYMS 135 select GENERIC_TRACER 136 select CONTEXT_SWITCH_TRACER 137 select GLOB 138 select TASKS_RCU if PREEMPT 139 help 140 Enable the kernel to trace every kernel function. This is done 141 by using a compiler feature to insert a small, 5-byte No-Operation 142 instruction at the beginning of every kernel function, which NOP 143 sequence is then dynamically patched into a tracer call when 144 tracing is enabled by the administrator. If it's runtime disabled 145 (the bootup default), then the overhead of the instructions is very 146 small and not measurable even in micro-benchmarks. 147 148config FUNCTION_GRAPH_TRACER 149 bool "Kernel Function Graph Tracer" 150 depends on HAVE_FUNCTION_GRAPH_TRACER 151 depends on FUNCTION_TRACER 152 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 153 default y 154 help 155 Enable the kernel to trace a function at both its return 156 and its entry. 157 Its first purpose is to trace the duration of functions and 158 draw a call graph for each thread with some information like 159 the return value. This is done by setting the current return 160 address on the current task structure into a stack of calls. 161 162 163config PREEMPTIRQ_EVENTS 164 bool "Enable trace events for preempt and irq disable/enable" 165 select TRACE_IRQFLAGS 166 depends on DEBUG_PREEMPT || !PROVE_LOCKING 167 depends on TRACING 168 default n 169 help 170 Enable tracing of disable and enable events for preemption and irqs. 171 For tracing preempt disable/enable events, DEBUG_PREEMPT must be 172 enabled. For tracing irq disable/enable events, PROVE_LOCKING must 173 be disabled. 174 175config IRQSOFF_TRACER 176 bool "Interrupts-off Latency Tracer" 177 default n 178 depends on TRACE_IRQFLAGS_SUPPORT 179 depends on !ARCH_USES_GETTIMEOFFSET 180 select TRACE_IRQFLAGS 181 select GENERIC_TRACER 182 select TRACER_MAX_TRACE 183 select RING_BUFFER_ALLOW_SWAP 184 select TRACER_SNAPSHOT 185 select TRACER_SNAPSHOT_PER_CPU_SWAP 186 help 187 This option measures the time spent in irqs-off critical 188 sections, with microsecond accuracy. 189 190 The default measurement method is a maximum search, which is 191 disabled by default and can be runtime (re-)started 192 via: 193 194 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 195 196 (Note that kernel size and overhead increase with this option 197 enabled. This option and the preempt-off timing option can be 198 used together or separately.) 199 200config PREEMPT_TRACER 201 bool "Preemption-off Latency Tracer" 202 default n 203 depends on !ARCH_USES_GETTIMEOFFSET 204 depends on PREEMPT 205 select GENERIC_TRACER 206 select TRACER_MAX_TRACE 207 select RING_BUFFER_ALLOW_SWAP 208 select TRACER_SNAPSHOT 209 select TRACER_SNAPSHOT_PER_CPU_SWAP 210 help 211 This option measures the time spent in preemption-off critical 212 sections, with microsecond accuracy. 213 214 The default measurement method is a maximum search, which is 215 disabled by default and can be runtime (re-)started 216 via: 217 218 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 219 220 (Note that kernel size and overhead increase with this option 221 enabled. This option and the irqs-off timing option can be 222 used together or separately.) 223 224config SCHED_TRACER 225 bool "Scheduling Latency Tracer" 226 select GENERIC_TRACER 227 select CONTEXT_SWITCH_TRACER 228 select TRACER_MAX_TRACE 229 select TRACER_SNAPSHOT 230 help 231 This tracer tracks the latency of the highest priority task 232 to be scheduled in, starting from the point it has woken up. 233 234config HWLAT_TRACER 235 bool "Tracer to detect hardware latencies (like SMIs)" 236 select GENERIC_TRACER 237 help 238 This tracer, when enabled will create one or more kernel threads, 239 depending on what the cpumask file is set to, which each thread 240 spinning in a loop looking for interruptions caused by 241 something other than the kernel. For example, if a 242 System Management Interrupt (SMI) takes a noticeable amount of 243 time, this tracer will detect it. This is useful for testing 244 if a system is reliable for Real Time tasks. 245 246 Some files are created in the tracing directory when this 247 is enabled: 248 249 hwlat_detector/width - time in usecs for how long to spin for 250 hwlat_detector/window - time in usecs between the start of each 251 iteration 252 253 A kernel thread is created that will spin with interrupts disabled 254 for "width" microseconds in every "window" cycle. It will not spin 255 for "window - width" microseconds, where the system can 256 continue to operate. 257 258 The output will appear in the trace and trace_pipe files. 259 260 When the tracer is not running, it has no affect on the system, 261 but when it is running, it can cause the system to be 262 periodically non responsive. Do not run this tracer on a 263 production system. 264 265 To enable this tracer, echo in "hwlat" into the current_tracer 266 file. Every time a latency is greater than tracing_thresh, it will 267 be recorded into the ring buffer. 268 269config ENABLE_DEFAULT_TRACERS 270 bool "Trace process context switches and events" 271 depends on !GENERIC_TRACER 272 select TRACING 273 help 274 This tracer hooks to various trace points in the kernel, 275 allowing the user to pick and choose which trace point they 276 want to trace. It also includes the sched_switch tracer plugin. 277 278config FTRACE_SYSCALLS 279 bool "Trace syscalls" 280 depends on HAVE_SYSCALL_TRACEPOINTS 281 select GENERIC_TRACER 282 select KALLSYMS 283 help 284 Basic tracer to catch the syscall entry and exit events. 285 286config TRACER_SNAPSHOT 287 bool "Create a snapshot trace buffer" 288 select TRACER_MAX_TRACE 289 help 290 Allow tracing users to take snapshot of the current buffer using the 291 ftrace interface, e.g.: 292 293 echo 1 > /sys/kernel/debug/tracing/snapshot 294 cat snapshot 295 296config TRACER_SNAPSHOT_PER_CPU_SWAP 297 bool "Allow snapshot to swap per CPU" 298 depends on TRACER_SNAPSHOT 299 select RING_BUFFER_ALLOW_SWAP 300 help 301 Allow doing a snapshot of a single CPU buffer instead of a 302 full swap (all buffers). If this is set, then the following is 303 allowed: 304 305 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 306 307 After which, only the tracing buffer for CPU 2 was swapped with 308 the main tracing buffer, and the other CPU buffers remain the same. 309 310 When this is enabled, this adds a little more overhead to the 311 trace recording, as it needs to add some checks to synchronize 312 recording with swaps. But this does not affect the performance 313 of the overall system. This is enabled by default when the preempt 314 or irq latency tracers are enabled, as those need to swap as well 315 and already adds the overhead (plus a lot more). 316 317config TRACE_BRANCH_PROFILING 318 bool 319 select GENERIC_TRACER 320 321choice 322 prompt "Branch Profiling" 323 default BRANCH_PROFILE_NONE 324 help 325 The branch profiling is a software profiler. It will add hooks 326 into the C conditionals to test which path a branch takes. 327 328 The likely/unlikely profiler only looks at the conditions that 329 are annotated with a likely or unlikely macro. 330 331 The "all branch" profiler will profile every if-statement in the 332 kernel. This profiler will also enable the likely/unlikely 333 profiler. 334 335 Either of the above profilers adds a bit of overhead to the system. 336 If unsure, choose "No branch profiling". 337 338config BRANCH_PROFILE_NONE 339 bool "No branch profiling" 340 help 341 No branch profiling. Branch profiling adds a bit of overhead. 342 Only enable it if you want to analyse the branching behavior. 343 Otherwise keep it disabled. 344 345config PROFILE_ANNOTATED_BRANCHES 346 bool "Trace likely/unlikely profiler" 347 select TRACE_BRANCH_PROFILING 348 help 349 This tracer profiles all likely and unlikely macros 350 in the kernel. It will display the results in: 351 352 /sys/kernel/debug/tracing/trace_stat/branch_annotated 353 354 Note: this will add a significant overhead; only turn this 355 on if you need to profile the system's use of these macros. 356 357config PROFILE_ALL_BRANCHES 358 bool "Profile all if conditionals" if !FORTIFY_SOURCE 359 select TRACE_BRANCH_PROFILING 360 help 361 This tracer profiles all branch conditions. Every if () 362 taken in the kernel is recorded whether it hit or miss. 363 The results will be displayed in: 364 365 /sys/kernel/debug/tracing/trace_stat/branch_all 366 367 This option also enables the likely/unlikely profiler. 368 369 This configuration, when enabled, will impose a great overhead 370 on the system. This should only be enabled when the system 371 is to be analyzed in much detail. 372endchoice 373 374config TRACING_BRANCHES 375 bool 376 help 377 Selected by tracers that will trace the likely and unlikely 378 conditions. This prevents the tracers themselves from being 379 profiled. Profiling the tracing infrastructure can only happen 380 when the likelys and unlikelys are not being traced. 381 382config BRANCH_TRACER 383 bool "Trace likely/unlikely instances" 384 depends on TRACE_BRANCH_PROFILING 385 select TRACING_BRANCHES 386 help 387 This traces the events of likely and unlikely condition 388 calls in the kernel. The difference between this and the 389 "Trace likely/unlikely profiler" is that this is not a 390 histogram of the callers, but actually places the calling 391 events into a running trace buffer to see when and where the 392 events happened, as well as their results. 393 394 Say N if unsure. 395 396config STACK_TRACER 397 bool "Trace max stack" 398 depends on HAVE_FUNCTION_TRACER 399 select FUNCTION_TRACER 400 select STACKTRACE 401 select KALLSYMS 402 help 403 This special tracer records the maximum stack footprint of the 404 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 405 406 This tracer works by hooking into every function call that the 407 kernel executes, and keeping a maximum stack depth value and 408 stack-trace saved. If this is configured with DYNAMIC_FTRACE 409 then it will not have any overhead while the stack tracer 410 is disabled. 411 412 To enable the stack tracer on bootup, pass in 'stacktrace' 413 on the kernel command line. 414 415 The stack tracer can also be enabled or disabled via the 416 sysctl kernel.stack_tracer_enabled 417 418 Say N if unsure. 419 420config BLK_DEV_IO_TRACE 421 bool "Support for tracing block IO actions" 422 depends on SYSFS 423 depends on BLOCK 424 select RELAY 425 select DEBUG_FS 426 select TRACEPOINTS 427 select GENERIC_TRACER 428 select STACKTRACE 429 help 430 Say Y here if you want to be able to trace the block layer actions 431 on a given queue. Tracing allows you to see any traffic happening 432 on a block device queue. For more information (and the userspace 433 support tools needed), fetch the blktrace tools from: 434 435 git://git.kernel.dk/blktrace.git 436 437 Tracing also is possible using the ftrace interface, e.g.: 438 439 echo 1 > /sys/block/sda/sda1/trace/enable 440 echo blk > /sys/kernel/debug/tracing/current_tracer 441 cat /sys/kernel/debug/tracing/trace_pipe 442 443 If unsure, say N. 444 445config KPROBE_EVENTS 446 depends on KPROBES 447 depends on HAVE_REGS_AND_STACK_ACCESS_API 448 bool "Enable kprobes-based dynamic events" 449 select TRACING 450 select PROBE_EVENTS 451 default y 452 help 453 This allows the user to add tracing events (similar to tracepoints) 454 on the fly via the ftrace interface. See 455 Documentation/trace/kprobetrace.txt for more details. 456 457 Those events can be inserted wherever kprobes can probe, and record 458 various register and memory values. 459 460 This option is also required by perf-probe subcommand of perf tools. 461 If you want to use perf tools, this option is strongly recommended. 462 463config UPROBE_EVENTS 464 bool "Enable uprobes-based dynamic events" 465 depends on ARCH_SUPPORTS_UPROBES 466 depends on MMU 467 depends on PERF_EVENTS 468 select UPROBES 469 select PROBE_EVENTS 470 select TRACING 471 default y 472 help 473 This allows the user to add tracing events on top of userspace 474 dynamic events (similar to tracepoints) on the fly via the trace 475 events interface. Those events can be inserted wherever uprobes 476 can probe, and record various registers. 477 This option is required if you plan to use perf-probe subcommand 478 of perf tools on user space applications. 479 480config BPF_EVENTS 481 depends on BPF_SYSCALL 482 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 483 bool 484 default y 485 help 486 This allows the user to attach BPF programs to kprobe events. 487 488config PROBE_EVENTS 489 def_bool n 490 491config DYNAMIC_FTRACE 492 bool "enable/disable function tracing dynamically" 493 depends on FUNCTION_TRACER 494 depends on HAVE_DYNAMIC_FTRACE 495 default y 496 help 497 This option will modify all the calls to function tracing 498 dynamically (will patch them out of the binary image and 499 replace them with a No-Op instruction) on boot up. During 500 compile time, a table is made of all the locations that ftrace 501 can function trace, and this table is linked into the kernel 502 image. When this is enabled, functions can be individually 503 enabled, and the functions not enabled will not affect 504 performance of the system. 505 506 See the files in /sys/kernel/debug/tracing: 507 available_filter_functions 508 set_ftrace_filter 509 set_ftrace_notrace 510 511 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 512 otherwise has native performance as long as no tracing is active. 513 514config DYNAMIC_FTRACE_WITH_REGS 515 def_bool y 516 depends on DYNAMIC_FTRACE 517 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 518 519config FUNCTION_PROFILER 520 bool "Kernel function profiler" 521 depends on FUNCTION_TRACER 522 default n 523 help 524 This option enables the kernel function profiler. A file is created 525 in debugfs called function_profile_enabled which defaults to zero. 526 When a 1 is echoed into this file profiling begins, and when a 527 zero is entered, profiling stops. A "functions" file is created in 528 the trace_stats directory; this file shows the list of functions that 529 have been hit and their counters. 530 531 If in doubt, say N. 532 533config BPF_KPROBE_OVERRIDE 534 bool "Enable BPF programs to override a kprobed function" 535 depends on BPF_EVENTS 536 depends on FUNCTION_ERROR_INJECTION 537 default n 538 help 539 Allows BPF to override the execution of a probed function and 540 set a different return value. This is used for error injection. 541 542config FTRACE_MCOUNT_RECORD 543 def_bool y 544 depends on DYNAMIC_FTRACE 545 depends on HAVE_FTRACE_MCOUNT_RECORD 546 547config FTRACE_SELFTEST 548 bool 549 550config FTRACE_STARTUP_TEST 551 bool "Perform a startup test on ftrace" 552 depends on GENERIC_TRACER 553 select FTRACE_SELFTEST 554 help 555 This option performs a series of startup tests on ftrace. On bootup 556 a series of tests are made to verify that the tracer is 557 functioning properly. It will do tests on all the configured 558 tracers of ftrace. 559 560config EVENT_TRACE_TEST_SYSCALLS 561 bool "Run selftest on syscall events" 562 depends on FTRACE_STARTUP_TEST 563 help 564 This option will also enable testing every syscall event. 565 It only enables the event and disables it and runs various loads 566 with the event enabled. This adds a bit more time for kernel boot 567 up since it runs this on every system call defined. 568 569 TBD - enable a way to actually call the syscalls as we test their 570 events 571 572config MMIOTRACE 573 bool "Memory mapped IO tracing" 574 depends on HAVE_MMIOTRACE_SUPPORT && PCI 575 select GENERIC_TRACER 576 help 577 Mmiotrace traces Memory Mapped I/O access and is meant for 578 debugging and reverse engineering. It is called from the ioremap 579 implementation and works via page faults. Tracing is disabled by 580 default and can be enabled at run-time. 581 582 See Documentation/trace/mmiotrace.txt. 583 If you are not helping to develop drivers, say N. 584 585config TRACING_MAP 586 bool 587 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 588 help 589 tracing_map is a special-purpose lock-free map for tracing, 590 separated out as a stand-alone facility in order to allow it 591 to be shared between multiple tracers. It isn't meant to be 592 generally used outside of that context, and is normally 593 selected by tracers that use it. 594 595config HIST_TRIGGERS 596 bool "Histogram triggers" 597 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 598 select TRACING_MAP 599 select TRACING 600 default n 601 help 602 Hist triggers allow one or more arbitrary trace event fields 603 to be aggregated into hash tables and dumped to stdout by 604 reading a debugfs/tracefs file. They're useful for 605 gathering quick and dirty (though precise) summaries of 606 event activity as an initial guide for further investigation 607 using more advanced tools. 608 609 Inter-event tracing of quantities such as latencies is also 610 supported using hist triggers under this option. 611 612 See Documentation/trace/histogram.txt. 613 If in doubt, say N. 614 615config MMIOTRACE_TEST 616 tristate "Test module for mmiotrace" 617 depends on MMIOTRACE && m 618 help 619 This is a dumb module for testing mmiotrace. It is very dangerous 620 as it will write garbage to IO memory starting at a given address. 621 However, it should be safe to use on e.g. unused portion of VRAM. 622 623 Say N, unless you absolutely know what you are doing. 624 625config TRACEPOINT_BENCHMARK 626 bool "Add tracepoint that benchmarks tracepoints" 627 help 628 This option creates the tracepoint "benchmark:benchmark_event". 629 When the tracepoint is enabled, it kicks off a kernel thread that 630 goes into an infinite loop (calling cond_sched() to let other tasks 631 run), and calls the tracepoint. Each iteration will record the time 632 it took to write to the tracepoint and the next iteration that 633 data will be passed to the tracepoint itself. That is, the tracepoint 634 will report the time it took to do the previous tracepoint. 635 The string written to the tracepoint is a static string of 128 bytes 636 to keep the time the same. The initial string is simply a write of 637 "START". The second string records the cold cache time of the first 638 write which is not added to the rest of the calculations. 639 640 As it is a tight loop, it benchmarks as hot cache. That's fine because 641 we care most about hot paths that are probably in cache already. 642 643 An example of the output: 644 645 START 646 first=3672 [COLD CACHED] 647 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 648 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 649 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 650 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 651 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 652 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 653 654 655config RING_BUFFER_BENCHMARK 656 tristate "Ring buffer benchmark stress tester" 657 depends on RING_BUFFER 658 help 659 This option creates a test to stress the ring buffer and benchmark it. 660 It creates its own ring buffer such that it will not interfere with 661 any other users of the ring buffer (such as ftrace). It then creates 662 a producer and consumer that will run for 10 seconds and sleep for 663 10 seconds. Each interval it will print out the number of events 664 it recorded and give a rough estimate of how long each iteration took. 665 666 It does not disable interrupts or raise its priority, so it may be 667 affected by processes that are running. 668 669 If unsure, say N. 670 671config RING_BUFFER_STARTUP_TEST 672 bool "Ring buffer startup self test" 673 depends on RING_BUFFER 674 help 675 Run a simple self test on the ring buffer on boot up. Late in the 676 kernel boot sequence, the test will start that kicks off 677 a thread per cpu. Each thread will write various size events 678 into the ring buffer. Another thread is created to send IPIs 679 to each of the threads, where the IPI handler will also write 680 to the ring buffer, to test/stress the nesting ability. 681 If any anomalies are discovered, a warning will be displayed 682 and all ring buffers will be disabled. 683 684 The test runs for 10 seconds. This will slow your boot time 685 by at least 10 more seconds. 686 687 At the end of the test, statics and more checks are done. 688 It will output the stats of each per cpu buffer. What 689 was written, the sizes, what was read, what was lost, and 690 other similar details. 691 692 If unsure, say N 693 694config TRACE_EVAL_MAP_FILE 695 bool "Show eval mappings for trace events" 696 depends on TRACING 697 help 698 The "print fmt" of the trace events will show the enum/sizeof names 699 instead of their values. This can cause problems for user space tools 700 that use this string to parse the raw data as user space does not know 701 how to convert the string to its value. 702 703 To fix this, there's a special macro in the kernel that can be used 704 to convert an enum/sizeof into its value. If this macro is used, then 705 the print fmt strings will be converted to their values. 706 707 If something does not get converted properly, this option can be 708 used to show what enums/sizeof the kernel tried to convert. 709 710 This option is for debugging the conversions. A file is created 711 in the tracing directory called "eval_map" that will show the 712 names matched with their values and what trace event system they 713 belong too. 714 715 Normally, the mapping of the strings to values will be freed after 716 boot up or module load. With this option, they will not be freed, as 717 they are needed for the "eval_map" file. Enabling this option will 718 increase the memory footprint of the running kernel. 719 720 If unsure, say N 721 722config TRACING_EVENTS_GPIO 723 bool "Trace gpio events" 724 depends on GPIOLIB 725 default y 726 help 727 Enable tracing events for gpio subsystem 728 729endif # FTRACE 730 731endif # TRACING_SUPPORT 732 733