1# 2# Architectures that offer an FUNCTION_TRACER implementation should 3# select HAVE_FUNCTION_TRACER: 4# 5 6config USER_STACKTRACE_SUPPORT 7 bool 8 9config NOP_TRACER 10 bool 11 12config HAVE_FTRACE_NMI_ENTER 13 bool 14 help 15 See Documentation/trace/ftrace-design.rst 16 17config HAVE_FUNCTION_TRACER 18 bool 19 help 20 See Documentation/trace/ftrace-design.rst 21 22config HAVE_FUNCTION_GRAPH_TRACER 23 bool 24 help 25 See Documentation/trace/ftrace-design.rst 26 27config HAVE_DYNAMIC_FTRACE 28 bool 29 help 30 See Documentation/trace/ftrace-design.rst 31 32config HAVE_DYNAMIC_FTRACE_WITH_REGS 33 bool 34 35config HAVE_FTRACE_MCOUNT_RECORD 36 bool 37 help 38 See Documentation/trace/ftrace-design.rst 39 40config HAVE_SYSCALL_TRACEPOINTS 41 bool 42 help 43 See Documentation/trace/ftrace-design.rst 44 45config HAVE_FENTRY 46 bool 47 help 48 Arch supports the gcc options -pg with -mfentry 49 50config HAVE_C_RECORDMCOUNT 51 bool 52 help 53 C version of recordmcount available? 54 55config TRACER_MAX_TRACE 56 bool 57 58config TRACE_CLOCK 59 bool 60 61config RING_BUFFER 62 bool 63 select TRACE_CLOCK 64 select IRQ_WORK 65 66config FTRACE_NMI_ENTER 67 bool 68 depends on HAVE_FTRACE_NMI_ENTER 69 default y 70 71config EVENT_TRACING 72 select CONTEXT_SWITCH_TRACER 73 select GLOB 74 bool 75 76config CONTEXT_SWITCH_TRACER 77 bool 78 79config RING_BUFFER_ALLOW_SWAP 80 bool 81 help 82 Allow the use of ring_buffer_swap_cpu. 83 Adds a very slight overhead to tracing when enabled. 84 85# All tracer options should select GENERIC_TRACER. For those options that are 86# enabled by all tracers (context switch and event tracer) they select TRACING. 87# This allows those options to appear when no other tracer is selected. But the 88# options do not appear when something else selects it. We need the two options 89# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 90# hiding of the automatic options. 91 92config TRACING 93 bool 94 select DEBUG_FS 95 select RING_BUFFER 96 select STACKTRACE if STACKTRACE_SUPPORT 97 select TRACEPOINTS 98 select NOP_TRACER 99 select BINARY_PRINTF 100 select EVENT_TRACING 101 select TRACE_CLOCK 102 103config GENERIC_TRACER 104 bool 105 select TRACING 106 107# 108# Minimum requirements an architecture has to meet for us to 109# be able to offer generic tracing facilities: 110# 111config TRACING_SUPPORT 112 bool 113 depends on TRACE_IRQFLAGS_SUPPORT 114 depends on STACKTRACE_SUPPORT 115 default y 116 117if TRACING_SUPPORT 118 119menuconfig FTRACE 120 bool "Tracers" 121 default y if DEBUG_KERNEL 122 help 123 Enable the kernel tracing infrastructure. 124 125if FTRACE 126 127config FUNCTION_TRACER 128 bool "Kernel Function Tracer" 129 depends on HAVE_FUNCTION_TRACER 130 select KALLSYMS 131 select GENERIC_TRACER 132 select CONTEXT_SWITCH_TRACER 133 select GLOB 134 select TASKS_RCU if PREEMPT 135 help 136 Enable the kernel to trace every kernel function. This is done 137 by using a compiler feature to insert a small, 5-byte No-Operation 138 instruction at the beginning of every kernel function, which NOP 139 sequence is then dynamically patched into a tracer call when 140 tracing is enabled by the administrator. If it's runtime disabled 141 (the bootup default), then the overhead of the instructions is very 142 small and not measurable even in micro-benchmarks. 143 144config FUNCTION_GRAPH_TRACER 145 bool "Kernel Function Graph Tracer" 146 depends on HAVE_FUNCTION_GRAPH_TRACER 147 depends on FUNCTION_TRACER 148 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 149 default y 150 help 151 Enable the kernel to trace a function at both its return 152 and its entry. 153 Its first purpose is to trace the duration of functions and 154 draw a call graph for each thread with some information like 155 the return value. This is done by setting the current return 156 address on the current task structure into a stack of calls. 157 158 159config PREEMPTIRQ_EVENTS 160 bool "Enable trace events for preempt and irq disable/enable" 161 select TRACE_IRQFLAGS 162 depends on DEBUG_PREEMPT || !PROVE_LOCKING 163 depends on TRACING 164 default n 165 help 166 Enable tracing of disable and enable events for preemption and irqs. 167 For tracing preempt disable/enable events, DEBUG_PREEMPT must be 168 enabled. For tracing irq disable/enable events, PROVE_LOCKING must 169 be disabled. 170 171config IRQSOFF_TRACER 172 bool "Interrupts-off Latency Tracer" 173 default n 174 depends on TRACE_IRQFLAGS_SUPPORT 175 depends on !ARCH_USES_GETTIMEOFFSET 176 select TRACE_IRQFLAGS 177 select GENERIC_TRACER 178 select TRACER_MAX_TRACE 179 select RING_BUFFER_ALLOW_SWAP 180 select TRACER_SNAPSHOT 181 select TRACER_SNAPSHOT_PER_CPU_SWAP 182 help 183 This option measures the time spent in irqs-off critical 184 sections, with microsecond accuracy. 185 186 The default measurement method is a maximum search, which is 187 disabled by default and can be runtime (re-)started 188 via: 189 190 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 191 192 (Note that kernel size and overhead increase with this option 193 enabled. This option and the preempt-off timing option can be 194 used together or separately.) 195 196config PREEMPT_TRACER 197 bool "Preemption-off Latency Tracer" 198 default n 199 depends on !ARCH_USES_GETTIMEOFFSET 200 depends on PREEMPT 201 select GENERIC_TRACER 202 select TRACER_MAX_TRACE 203 select RING_BUFFER_ALLOW_SWAP 204 select TRACER_SNAPSHOT 205 select TRACER_SNAPSHOT_PER_CPU_SWAP 206 help 207 This option measures the time spent in preemption-off critical 208 sections, with microsecond accuracy. 209 210 The default measurement method is a maximum search, which is 211 disabled by default and can be runtime (re-)started 212 via: 213 214 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 215 216 (Note that kernel size and overhead increase with this option 217 enabled. This option and the irqs-off timing option can be 218 used together or separately.) 219 220config SCHED_TRACER 221 bool "Scheduling Latency Tracer" 222 select GENERIC_TRACER 223 select CONTEXT_SWITCH_TRACER 224 select TRACER_MAX_TRACE 225 select TRACER_SNAPSHOT 226 help 227 This tracer tracks the latency of the highest priority task 228 to be scheduled in, starting from the point it has woken up. 229 230config HWLAT_TRACER 231 bool "Tracer to detect hardware latencies (like SMIs)" 232 select GENERIC_TRACER 233 help 234 This tracer, when enabled will create one or more kernel threads, 235 depending on what the cpumask file is set to, which each thread 236 spinning in a loop looking for interruptions caused by 237 something other than the kernel. For example, if a 238 System Management Interrupt (SMI) takes a noticeable amount of 239 time, this tracer will detect it. This is useful for testing 240 if a system is reliable for Real Time tasks. 241 242 Some files are created in the tracing directory when this 243 is enabled: 244 245 hwlat_detector/width - time in usecs for how long to spin for 246 hwlat_detector/window - time in usecs between the start of each 247 iteration 248 249 A kernel thread is created that will spin with interrupts disabled 250 for "width" microseconds in every "window" cycle. It will not spin 251 for "window - width" microseconds, where the system can 252 continue to operate. 253 254 The output will appear in the trace and trace_pipe files. 255 256 When the tracer is not running, it has no affect on the system, 257 but when it is running, it can cause the system to be 258 periodically non responsive. Do not run this tracer on a 259 production system. 260 261 To enable this tracer, echo in "hwlat" into the current_tracer 262 file. Every time a latency is greater than tracing_thresh, it will 263 be recorded into the ring buffer. 264 265config ENABLE_DEFAULT_TRACERS 266 bool "Trace process context switches and events" 267 depends on !GENERIC_TRACER 268 select TRACING 269 help 270 This tracer hooks to various trace points in the kernel, 271 allowing the user to pick and choose which trace point they 272 want to trace. It also includes the sched_switch tracer plugin. 273 274config FTRACE_SYSCALLS 275 bool "Trace syscalls" 276 depends on HAVE_SYSCALL_TRACEPOINTS 277 select GENERIC_TRACER 278 select KALLSYMS 279 help 280 Basic tracer to catch the syscall entry and exit events. 281 282config TRACER_SNAPSHOT 283 bool "Create a snapshot trace buffer" 284 select TRACER_MAX_TRACE 285 help 286 Allow tracing users to take snapshot of the current buffer using the 287 ftrace interface, e.g.: 288 289 echo 1 > /sys/kernel/debug/tracing/snapshot 290 cat snapshot 291 292config TRACER_SNAPSHOT_PER_CPU_SWAP 293 bool "Allow snapshot to swap per CPU" 294 depends on TRACER_SNAPSHOT 295 select RING_BUFFER_ALLOW_SWAP 296 help 297 Allow doing a snapshot of a single CPU buffer instead of a 298 full swap (all buffers). If this is set, then the following is 299 allowed: 300 301 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 302 303 After which, only the tracing buffer for CPU 2 was swapped with 304 the main tracing buffer, and the other CPU buffers remain the same. 305 306 When this is enabled, this adds a little more overhead to the 307 trace recording, as it needs to add some checks to synchronize 308 recording with swaps. But this does not affect the performance 309 of the overall system. This is enabled by default when the preempt 310 or irq latency tracers are enabled, as those need to swap as well 311 and already adds the overhead (plus a lot more). 312 313config TRACE_BRANCH_PROFILING 314 bool 315 select GENERIC_TRACER 316 317choice 318 prompt "Branch Profiling" 319 default BRANCH_PROFILE_NONE 320 help 321 The branch profiling is a software profiler. It will add hooks 322 into the C conditionals to test which path a branch takes. 323 324 The likely/unlikely profiler only looks at the conditions that 325 are annotated with a likely or unlikely macro. 326 327 The "all branch" profiler will profile every if-statement in the 328 kernel. This profiler will also enable the likely/unlikely 329 profiler. 330 331 Either of the above profilers adds a bit of overhead to the system. 332 If unsure, choose "No branch profiling". 333 334config BRANCH_PROFILE_NONE 335 bool "No branch profiling" 336 help 337 No branch profiling. Branch profiling adds a bit of overhead. 338 Only enable it if you want to analyse the branching behavior. 339 Otherwise keep it disabled. 340 341config PROFILE_ANNOTATED_BRANCHES 342 bool "Trace likely/unlikely profiler" 343 select TRACE_BRANCH_PROFILING 344 help 345 This tracer profiles all likely and unlikely macros 346 in the kernel. It will display the results in: 347 348 /sys/kernel/debug/tracing/trace_stat/branch_annotated 349 350 Note: this will add a significant overhead; only turn this 351 on if you need to profile the system's use of these macros. 352 353config PROFILE_ALL_BRANCHES 354 bool "Profile all if conditionals" if !FORTIFY_SOURCE 355 select TRACE_BRANCH_PROFILING 356 help 357 This tracer profiles all branch conditions. Every if () 358 taken in the kernel is recorded whether it hit or miss. 359 The results will be displayed in: 360 361 /sys/kernel/debug/tracing/trace_stat/branch_all 362 363 This option also enables the likely/unlikely profiler. 364 365 This configuration, when enabled, will impose a great overhead 366 on the system. This should only be enabled when the system 367 is to be analyzed in much detail. 368endchoice 369 370config TRACING_BRANCHES 371 bool 372 help 373 Selected by tracers that will trace the likely and unlikely 374 conditions. This prevents the tracers themselves from being 375 profiled. Profiling the tracing infrastructure can only happen 376 when the likelys and unlikelys are not being traced. 377 378config BRANCH_TRACER 379 bool "Trace likely/unlikely instances" 380 depends on TRACE_BRANCH_PROFILING 381 select TRACING_BRANCHES 382 help 383 This traces the events of likely and unlikely condition 384 calls in the kernel. The difference between this and the 385 "Trace likely/unlikely profiler" is that this is not a 386 histogram of the callers, but actually places the calling 387 events into a running trace buffer to see when and where the 388 events happened, as well as their results. 389 390 Say N if unsure. 391 392config STACK_TRACER 393 bool "Trace max stack" 394 depends on HAVE_FUNCTION_TRACER 395 select FUNCTION_TRACER 396 select STACKTRACE 397 select KALLSYMS 398 help 399 This special tracer records the maximum stack footprint of the 400 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 401 402 This tracer works by hooking into every function call that the 403 kernel executes, and keeping a maximum stack depth value and 404 stack-trace saved. If this is configured with DYNAMIC_FTRACE 405 then it will not have any overhead while the stack tracer 406 is disabled. 407 408 To enable the stack tracer on bootup, pass in 'stacktrace' 409 on the kernel command line. 410 411 The stack tracer can also be enabled or disabled via the 412 sysctl kernel.stack_tracer_enabled 413 414 Say N if unsure. 415 416config BLK_DEV_IO_TRACE 417 bool "Support for tracing block IO actions" 418 depends on SYSFS 419 depends on BLOCK 420 select RELAY 421 select DEBUG_FS 422 select TRACEPOINTS 423 select GENERIC_TRACER 424 select STACKTRACE 425 help 426 Say Y here if you want to be able to trace the block layer actions 427 on a given queue. Tracing allows you to see any traffic happening 428 on a block device queue. For more information (and the userspace 429 support tools needed), fetch the blktrace tools from: 430 431 git://git.kernel.dk/blktrace.git 432 433 Tracing also is possible using the ftrace interface, e.g.: 434 435 echo 1 > /sys/block/sda/sda1/trace/enable 436 echo blk > /sys/kernel/debug/tracing/current_tracer 437 cat /sys/kernel/debug/tracing/trace_pipe 438 439 If unsure, say N. 440 441config KPROBE_EVENTS 442 depends on KPROBES 443 depends on HAVE_REGS_AND_STACK_ACCESS_API 444 bool "Enable kprobes-based dynamic events" 445 select TRACING 446 select PROBE_EVENTS 447 default y 448 help 449 This allows the user to add tracing events (similar to tracepoints) 450 on the fly via the ftrace interface. See 451 Documentation/trace/kprobetrace.rst for more details. 452 453 Those events can be inserted wherever kprobes can probe, and record 454 various register and memory values. 455 456 This option is also required by perf-probe subcommand of perf tools. 457 If you want to use perf tools, this option is strongly recommended. 458 459config KPROBE_EVENTS_ON_NOTRACE 460 bool "Do NOT protect notrace function from kprobe events" 461 depends on KPROBE_EVENTS 462 depends on KPROBES_ON_FTRACE 463 default n 464 help 465 This is only for the developers who want to debug ftrace itself 466 using kprobe events. 467 468 If kprobes can use ftrace instead of breakpoint, ftrace related 469 functions are protected from kprobe-events to prevent an infinit 470 recursion or any unexpected execution path which leads to a kernel 471 crash. 472 473 This option disables such protection and allows you to put kprobe 474 events on ftrace functions for debugging ftrace by itself. 475 Note that this might let you shoot yourself in the foot. 476 477 If unsure, say N. 478 479config UPROBE_EVENTS 480 bool "Enable uprobes-based dynamic events" 481 depends on ARCH_SUPPORTS_UPROBES 482 depends on MMU 483 depends on PERF_EVENTS 484 select UPROBES 485 select PROBE_EVENTS 486 select TRACING 487 default y 488 help 489 This allows the user to add tracing events on top of userspace 490 dynamic events (similar to tracepoints) on the fly via the trace 491 events interface. Those events can be inserted wherever uprobes 492 can probe, and record various registers. 493 This option is required if you plan to use perf-probe subcommand 494 of perf tools on user space applications. 495 496config BPF_EVENTS 497 depends on BPF_SYSCALL 498 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 499 bool 500 default y 501 help 502 This allows the user to attach BPF programs to kprobe events. 503 504config PROBE_EVENTS 505 def_bool n 506 507config DYNAMIC_FTRACE 508 bool "enable/disable function tracing dynamically" 509 depends on FUNCTION_TRACER 510 depends on HAVE_DYNAMIC_FTRACE 511 default y 512 help 513 This option will modify all the calls to function tracing 514 dynamically (will patch them out of the binary image and 515 replace them with a No-Op instruction) on boot up. During 516 compile time, a table is made of all the locations that ftrace 517 can function trace, and this table is linked into the kernel 518 image. When this is enabled, functions can be individually 519 enabled, and the functions not enabled will not affect 520 performance of the system. 521 522 See the files in /sys/kernel/debug/tracing: 523 available_filter_functions 524 set_ftrace_filter 525 set_ftrace_notrace 526 527 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 528 otherwise has native performance as long as no tracing is active. 529 530config DYNAMIC_FTRACE_WITH_REGS 531 def_bool y 532 depends on DYNAMIC_FTRACE 533 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 534 535config FUNCTION_PROFILER 536 bool "Kernel function profiler" 537 depends on FUNCTION_TRACER 538 default n 539 help 540 This option enables the kernel function profiler. A file is created 541 in debugfs called function_profile_enabled which defaults to zero. 542 When a 1 is echoed into this file profiling begins, and when a 543 zero is entered, profiling stops. A "functions" file is created in 544 the trace_stats directory; this file shows the list of functions that 545 have been hit and their counters. 546 547 If in doubt, say N. 548 549config BPF_KPROBE_OVERRIDE 550 bool "Enable BPF programs to override a kprobed function" 551 depends on BPF_EVENTS 552 depends on FUNCTION_ERROR_INJECTION 553 default n 554 help 555 Allows BPF to override the execution of a probed function and 556 set a different return value. This is used for error injection. 557 558config FTRACE_MCOUNT_RECORD 559 def_bool y 560 depends on DYNAMIC_FTRACE 561 depends on HAVE_FTRACE_MCOUNT_RECORD 562 563config FTRACE_SELFTEST 564 bool 565 566config FTRACE_STARTUP_TEST 567 bool "Perform a startup test on ftrace" 568 depends on GENERIC_TRACER 569 select FTRACE_SELFTEST 570 help 571 This option performs a series of startup tests on ftrace. On bootup 572 a series of tests are made to verify that the tracer is 573 functioning properly. It will do tests on all the configured 574 tracers of ftrace. 575 576config EVENT_TRACE_TEST_SYSCALLS 577 bool "Run selftest on syscall events" 578 depends on FTRACE_STARTUP_TEST 579 help 580 This option will also enable testing every syscall event. 581 It only enables the event and disables it and runs various loads 582 with the event enabled. This adds a bit more time for kernel boot 583 up since it runs this on every system call defined. 584 585 TBD - enable a way to actually call the syscalls as we test their 586 events 587 588config MMIOTRACE 589 bool "Memory mapped IO tracing" 590 depends on HAVE_MMIOTRACE_SUPPORT && PCI 591 select GENERIC_TRACER 592 help 593 Mmiotrace traces Memory Mapped I/O access and is meant for 594 debugging and reverse engineering. It is called from the ioremap 595 implementation and works via page faults. Tracing is disabled by 596 default and can be enabled at run-time. 597 598 See Documentation/trace/mmiotrace.rst. 599 If you are not helping to develop drivers, say N. 600 601config TRACING_MAP 602 bool 603 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 604 help 605 tracing_map is a special-purpose lock-free map for tracing, 606 separated out as a stand-alone facility in order to allow it 607 to be shared between multiple tracers. It isn't meant to be 608 generally used outside of that context, and is normally 609 selected by tracers that use it. 610 611config HIST_TRIGGERS 612 bool "Histogram triggers" 613 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 614 select TRACING_MAP 615 select TRACING 616 default n 617 help 618 Hist triggers allow one or more arbitrary trace event fields 619 to be aggregated into hash tables and dumped to stdout by 620 reading a debugfs/tracefs file. They're useful for 621 gathering quick and dirty (though precise) summaries of 622 event activity as an initial guide for further investigation 623 using more advanced tools. 624 625 Inter-event tracing of quantities such as latencies is also 626 supported using hist triggers under this option. 627 628 See Documentation/trace/histogram.txt. 629 If in doubt, say N. 630 631config MMIOTRACE_TEST 632 tristate "Test module for mmiotrace" 633 depends on MMIOTRACE && m 634 help 635 This is a dumb module for testing mmiotrace. It is very dangerous 636 as it will write garbage to IO memory starting at a given address. 637 However, it should be safe to use on e.g. unused portion of VRAM. 638 639 Say N, unless you absolutely know what you are doing. 640 641config TRACEPOINT_BENCHMARK 642 bool "Add tracepoint that benchmarks tracepoints" 643 help 644 This option creates the tracepoint "benchmark:benchmark_event". 645 When the tracepoint is enabled, it kicks off a kernel thread that 646 goes into an infinite loop (calling cond_sched() to let other tasks 647 run), and calls the tracepoint. Each iteration will record the time 648 it took to write to the tracepoint and the next iteration that 649 data will be passed to the tracepoint itself. That is, the tracepoint 650 will report the time it took to do the previous tracepoint. 651 The string written to the tracepoint is a static string of 128 bytes 652 to keep the time the same. The initial string is simply a write of 653 "START". The second string records the cold cache time of the first 654 write which is not added to the rest of the calculations. 655 656 As it is a tight loop, it benchmarks as hot cache. That's fine because 657 we care most about hot paths that are probably in cache already. 658 659 An example of the output: 660 661 START 662 first=3672 [COLD CACHED] 663 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 664 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 665 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 666 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 667 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 668 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 669 670 671config RING_BUFFER_BENCHMARK 672 tristate "Ring buffer benchmark stress tester" 673 depends on RING_BUFFER 674 help 675 This option creates a test to stress the ring buffer and benchmark it. 676 It creates its own ring buffer such that it will not interfere with 677 any other users of the ring buffer (such as ftrace). It then creates 678 a producer and consumer that will run for 10 seconds and sleep for 679 10 seconds. Each interval it will print out the number of events 680 it recorded and give a rough estimate of how long each iteration took. 681 682 It does not disable interrupts or raise its priority, so it may be 683 affected by processes that are running. 684 685 If unsure, say N. 686 687config RING_BUFFER_STARTUP_TEST 688 bool "Ring buffer startup self test" 689 depends on RING_BUFFER 690 help 691 Run a simple self test on the ring buffer on boot up. Late in the 692 kernel boot sequence, the test will start that kicks off 693 a thread per cpu. Each thread will write various size events 694 into the ring buffer. Another thread is created to send IPIs 695 to each of the threads, where the IPI handler will also write 696 to the ring buffer, to test/stress the nesting ability. 697 If any anomalies are discovered, a warning will be displayed 698 and all ring buffers will be disabled. 699 700 The test runs for 10 seconds. This will slow your boot time 701 by at least 10 more seconds. 702 703 At the end of the test, statics and more checks are done. 704 It will output the stats of each per cpu buffer. What 705 was written, the sizes, what was read, what was lost, and 706 other similar details. 707 708 If unsure, say N 709 710config PREEMPTIRQ_DELAY_TEST 711 tristate "Preempt / IRQ disable delay thread to test latency tracers" 712 depends on m 713 help 714 Select this option to build a test module that can help test latency 715 tracers by executing a preempt or irq disable section with a user 716 configurable delay. The module busy waits for the duration of the 717 critical section. 718 719 For example, the following invocation forces a one-time irq-disabled 720 critical section for 500us: 721 modprobe preemptirq_delay_test test_mode=irq delay=500000 722 723 If unsure, say N 724 725config TRACE_EVAL_MAP_FILE 726 bool "Show eval mappings for trace events" 727 depends on TRACING 728 help 729 The "print fmt" of the trace events will show the enum/sizeof names 730 instead of their values. This can cause problems for user space tools 731 that use this string to parse the raw data as user space does not know 732 how to convert the string to its value. 733 734 To fix this, there's a special macro in the kernel that can be used 735 to convert an enum/sizeof into its value. If this macro is used, then 736 the print fmt strings will be converted to their values. 737 738 If something does not get converted properly, this option can be 739 used to show what enums/sizeof the kernel tried to convert. 740 741 This option is for debugging the conversions. A file is created 742 in the tracing directory called "eval_map" that will show the 743 names matched with their values and what trace event system they 744 belong too. 745 746 Normally, the mapping of the strings to values will be freed after 747 boot up or module load. With this option, they will not be freed, as 748 they are needed for the "eval_map" file. Enabling this option will 749 increase the memory footprint of the running kernel. 750 751 If unsure, say N 752 753config TRACING_EVENTS_GPIO 754 bool "Trace gpio events" 755 depends on GPIOLIB 756 default y 757 help 758 Enable tracing events for gpio subsystem 759 760endif # FTRACE 761 762endif # TRACING_SUPPORT 763 764