1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_FTRACE_NMI_ENTER 14 bool 15 help 16 See Documentation/trace/ftrace-design.rst 17 18config HAVE_FUNCTION_TRACER 19 bool 20 help 21 See Documentation/trace/ftrace-design.rst 22 23config HAVE_FUNCTION_GRAPH_TRACER 24 bool 25 help 26 See Documentation/trace/ftrace-design.rst 27 28config HAVE_DYNAMIC_FTRACE 29 bool 30 help 31 See Documentation/trace/ftrace-design.rst 32 33config HAVE_DYNAMIC_FTRACE_WITH_REGS 34 bool 35 36config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 37 bool 38 39config HAVE_FTRACE_MCOUNT_RECORD 40 bool 41 help 42 See Documentation/trace/ftrace-design.rst 43 44config HAVE_SYSCALL_TRACEPOINTS 45 bool 46 help 47 See Documentation/trace/ftrace-design.rst 48 49config HAVE_FENTRY 50 bool 51 help 52 Arch supports the gcc options -pg with -mfentry 53 54config HAVE_NOP_MCOUNT 55 bool 56 help 57 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 58 59config HAVE_C_RECORDMCOUNT 60 bool 61 help 62 C version of recordmcount available? 63 64config TRACER_MAX_TRACE 65 bool 66 67config TRACE_CLOCK 68 bool 69 70config RING_BUFFER 71 bool 72 select TRACE_CLOCK 73 select IRQ_WORK 74 75config FTRACE_NMI_ENTER 76 bool 77 depends on HAVE_FTRACE_NMI_ENTER 78 default y 79 80config EVENT_TRACING 81 select CONTEXT_SWITCH_TRACER 82 select GLOB 83 bool 84 85config CONTEXT_SWITCH_TRACER 86 bool 87 88config RING_BUFFER_ALLOW_SWAP 89 bool 90 help 91 Allow the use of ring_buffer_swap_cpu. 92 Adds a very slight overhead to tracing when enabled. 93 94config PREEMPTIRQ_TRACEPOINTS 95 bool 96 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 97 select TRACING 98 default y 99 help 100 Create preempt/irq toggle tracepoints if needed, so that other parts 101 of the kernel can use them to generate or add hooks to them. 102 103# All tracer options should select GENERIC_TRACER. For those options that are 104# enabled by all tracers (context switch and event tracer) they select TRACING. 105# This allows those options to appear when no other tracer is selected. But the 106# options do not appear when something else selects it. We need the two options 107# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 108# hiding of the automatic options. 109 110config TRACING 111 bool 112 select DEBUG_FS 113 select RING_BUFFER 114 select STACKTRACE if STACKTRACE_SUPPORT 115 select TRACEPOINTS 116 select NOP_TRACER 117 select BINARY_PRINTF 118 select EVENT_TRACING 119 select TRACE_CLOCK 120 121config GENERIC_TRACER 122 bool 123 select TRACING 124 125# 126# Minimum requirements an architecture has to meet for us to 127# be able to offer generic tracing facilities: 128# 129config TRACING_SUPPORT 130 bool 131 depends on TRACE_IRQFLAGS_SUPPORT 132 depends on STACKTRACE_SUPPORT 133 default y 134 135if TRACING_SUPPORT 136 137menuconfig FTRACE 138 bool "Tracers" 139 default y if DEBUG_KERNEL 140 help 141 Enable the kernel tracing infrastructure. 142 143if FTRACE 144 145config FUNCTION_TRACER 146 bool "Kernel Function Tracer" 147 depends on HAVE_FUNCTION_TRACER 148 select KALLSYMS 149 select GENERIC_TRACER 150 select CONTEXT_SWITCH_TRACER 151 select GLOB 152 select TASKS_RCU if PREEMPTION 153 help 154 Enable the kernel to trace every kernel function. This is done 155 by using a compiler feature to insert a small, 5-byte No-Operation 156 instruction at the beginning of every kernel function, which NOP 157 sequence is then dynamically patched into a tracer call when 158 tracing is enabled by the administrator. If it's runtime disabled 159 (the bootup default), then the overhead of the instructions is very 160 small and not measurable even in micro-benchmarks. 161 162config FUNCTION_GRAPH_TRACER 163 bool "Kernel Function Graph Tracer" 164 depends on HAVE_FUNCTION_GRAPH_TRACER 165 depends on FUNCTION_TRACER 166 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 167 default y 168 help 169 Enable the kernel to trace a function at both its return 170 and its entry. 171 Its first purpose is to trace the duration of functions and 172 draw a call graph for each thread with some information like 173 the return value. This is done by setting the current return 174 address on the current task structure into a stack of calls. 175 176config TRACE_PREEMPT_TOGGLE 177 bool 178 help 179 Enables hooks which will be called when preemption is first disabled, 180 and last enabled. 181 182config PREEMPTIRQ_EVENTS 183 bool "Enable trace events for preempt and irq disable/enable" 184 select TRACE_IRQFLAGS 185 select TRACE_PREEMPT_TOGGLE if PREEMPTION 186 select GENERIC_TRACER 187 default n 188 help 189 Enable tracing of disable and enable events for preemption and irqs. 190 191config IRQSOFF_TRACER 192 bool "Interrupts-off Latency Tracer" 193 default n 194 depends on TRACE_IRQFLAGS_SUPPORT 195 depends on !ARCH_USES_GETTIMEOFFSET 196 select TRACE_IRQFLAGS 197 select GENERIC_TRACER 198 select TRACER_MAX_TRACE 199 select RING_BUFFER_ALLOW_SWAP 200 select TRACER_SNAPSHOT 201 select TRACER_SNAPSHOT_PER_CPU_SWAP 202 help 203 This option measures the time spent in irqs-off critical 204 sections, with microsecond accuracy. 205 206 The default measurement method is a maximum search, which is 207 disabled by default and can be runtime (re-)started 208 via: 209 210 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 211 212 (Note that kernel size and overhead increase with this option 213 enabled. This option and the preempt-off timing option can be 214 used together or separately.) 215 216config PREEMPT_TRACER 217 bool "Preemption-off Latency Tracer" 218 default n 219 depends on !ARCH_USES_GETTIMEOFFSET 220 depends on PREEMPTION 221 select GENERIC_TRACER 222 select TRACER_MAX_TRACE 223 select RING_BUFFER_ALLOW_SWAP 224 select TRACER_SNAPSHOT 225 select TRACER_SNAPSHOT_PER_CPU_SWAP 226 select TRACE_PREEMPT_TOGGLE 227 help 228 This option measures the time spent in preemption-off critical 229 sections, with microsecond accuracy. 230 231 The default measurement method is a maximum search, which is 232 disabled by default and can be runtime (re-)started 233 via: 234 235 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 236 237 (Note that kernel size and overhead increase with this option 238 enabled. This option and the irqs-off timing option can be 239 used together or separately.) 240 241config SCHED_TRACER 242 bool "Scheduling Latency Tracer" 243 select GENERIC_TRACER 244 select CONTEXT_SWITCH_TRACER 245 select TRACER_MAX_TRACE 246 select TRACER_SNAPSHOT 247 help 248 This tracer tracks the latency of the highest priority task 249 to be scheduled in, starting from the point it has woken up. 250 251config HWLAT_TRACER 252 bool "Tracer to detect hardware latencies (like SMIs)" 253 select GENERIC_TRACER 254 help 255 This tracer, when enabled will create one or more kernel threads, 256 depending on what the cpumask file is set to, which each thread 257 spinning in a loop looking for interruptions caused by 258 something other than the kernel. For example, if a 259 System Management Interrupt (SMI) takes a noticeable amount of 260 time, this tracer will detect it. This is useful for testing 261 if a system is reliable for Real Time tasks. 262 263 Some files are created in the tracing directory when this 264 is enabled: 265 266 hwlat_detector/width - time in usecs for how long to spin for 267 hwlat_detector/window - time in usecs between the start of each 268 iteration 269 270 A kernel thread is created that will spin with interrupts disabled 271 for "width" microseconds in every "window" cycle. It will not spin 272 for "window - width" microseconds, where the system can 273 continue to operate. 274 275 The output will appear in the trace and trace_pipe files. 276 277 When the tracer is not running, it has no affect on the system, 278 but when it is running, it can cause the system to be 279 periodically non responsive. Do not run this tracer on a 280 production system. 281 282 To enable this tracer, echo in "hwlat" into the current_tracer 283 file. Every time a latency is greater than tracing_thresh, it will 284 be recorded into the ring buffer. 285 286config ENABLE_DEFAULT_TRACERS 287 bool "Trace process context switches and events" 288 depends on !GENERIC_TRACER 289 select TRACING 290 help 291 This tracer hooks to various trace points in the kernel, 292 allowing the user to pick and choose which trace point they 293 want to trace. It also includes the sched_switch tracer plugin. 294 295config FTRACE_SYSCALLS 296 bool "Trace syscalls" 297 depends on HAVE_SYSCALL_TRACEPOINTS 298 select GENERIC_TRACER 299 select KALLSYMS 300 help 301 Basic tracer to catch the syscall entry and exit events. 302 303config TRACER_SNAPSHOT 304 bool "Create a snapshot trace buffer" 305 select TRACER_MAX_TRACE 306 help 307 Allow tracing users to take snapshot of the current buffer using the 308 ftrace interface, e.g.: 309 310 echo 1 > /sys/kernel/debug/tracing/snapshot 311 cat snapshot 312 313config TRACER_SNAPSHOT_PER_CPU_SWAP 314 bool "Allow snapshot to swap per CPU" 315 depends on TRACER_SNAPSHOT 316 select RING_BUFFER_ALLOW_SWAP 317 help 318 Allow doing a snapshot of a single CPU buffer instead of a 319 full swap (all buffers). If this is set, then the following is 320 allowed: 321 322 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 323 324 After which, only the tracing buffer for CPU 2 was swapped with 325 the main tracing buffer, and the other CPU buffers remain the same. 326 327 When this is enabled, this adds a little more overhead to the 328 trace recording, as it needs to add some checks to synchronize 329 recording with swaps. But this does not affect the performance 330 of the overall system. This is enabled by default when the preempt 331 or irq latency tracers are enabled, as those need to swap as well 332 and already adds the overhead (plus a lot more). 333 334config TRACE_BRANCH_PROFILING 335 bool 336 select GENERIC_TRACER 337 338choice 339 prompt "Branch Profiling" 340 default BRANCH_PROFILE_NONE 341 help 342 The branch profiling is a software profiler. It will add hooks 343 into the C conditionals to test which path a branch takes. 344 345 The likely/unlikely profiler only looks at the conditions that 346 are annotated with a likely or unlikely macro. 347 348 The "all branch" profiler will profile every if-statement in the 349 kernel. This profiler will also enable the likely/unlikely 350 profiler. 351 352 Either of the above profilers adds a bit of overhead to the system. 353 If unsure, choose "No branch profiling". 354 355config BRANCH_PROFILE_NONE 356 bool "No branch profiling" 357 help 358 No branch profiling. Branch profiling adds a bit of overhead. 359 Only enable it if you want to analyse the branching behavior. 360 Otherwise keep it disabled. 361 362config PROFILE_ANNOTATED_BRANCHES 363 bool "Trace likely/unlikely profiler" 364 select TRACE_BRANCH_PROFILING 365 help 366 This tracer profiles all likely and unlikely macros 367 in the kernel. It will display the results in: 368 369 /sys/kernel/debug/tracing/trace_stat/branch_annotated 370 371 Note: this will add a significant overhead; only turn this 372 on if you need to profile the system's use of these macros. 373 374config PROFILE_ALL_BRANCHES 375 bool "Profile all if conditionals" if !FORTIFY_SOURCE 376 select TRACE_BRANCH_PROFILING 377 imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives 378 help 379 This tracer profiles all branch conditions. Every if () 380 taken in the kernel is recorded whether it hit or miss. 381 The results will be displayed in: 382 383 /sys/kernel/debug/tracing/trace_stat/branch_all 384 385 This option also enables the likely/unlikely profiler. 386 387 This configuration, when enabled, will impose a great overhead 388 on the system. This should only be enabled when the system 389 is to be analyzed in much detail. 390endchoice 391 392config TRACING_BRANCHES 393 bool 394 help 395 Selected by tracers that will trace the likely and unlikely 396 conditions. This prevents the tracers themselves from being 397 profiled. Profiling the tracing infrastructure can only happen 398 when the likelys and unlikelys are not being traced. 399 400config BRANCH_TRACER 401 bool "Trace likely/unlikely instances" 402 depends on TRACE_BRANCH_PROFILING 403 select TRACING_BRANCHES 404 help 405 This traces the events of likely and unlikely condition 406 calls in the kernel. The difference between this and the 407 "Trace likely/unlikely profiler" is that this is not a 408 histogram of the callers, but actually places the calling 409 events into a running trace buffer to see when and where the 410 events happened, as well as their results. 411 412 Say N if unsure. 413 414config STACK_TRACER 415 bool "Trace max stack" 416 depends on HAVE_FUNCTION_TRACER 417 select FUNCTION_TRACER 418 select STACKTRACE 419 select KALLSYMS 420 help 421 This special tracer records the maximum stack footprint of the 422 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 423 424 This tracer works by hooking into every function call that the 425 kernel executes, and keeping a maximum stack depth value and 426 stack-trace saved. If this is configured with DYNAMIC_FTRACE 427 then it will not have any overhead while the stack tracer 428 is disabled. 429 430 To enable the stack tracer on bootup, pass in 'stacktrace' 431 on the kernel command line. 432 433 The stack tracer can also be enabled or disabled via the 434 sysctl kernel.stack_tracer_enabled 435 436 Say N if unsure. 437 438config BLK_DEV_IO_TRACE 439 bool "Support for tracing block IO actions" 440 depends on SYSFS 441 depends on BLOCK 442 select RELAY 443 select DEBUG_FS 444 select TRACEPOINTS 445 select GENERIC_TRACER 446 select STACKTRACE 447 help 448 Say Y here if you want to be able to trace the block layer actions 449 on a given queue. Tracing allows you to see any traffic happening 450 on a block device queue. For more information (and the userspace 451 support tools needed), fetch the blktrace tools from: 452 453 git://git.kernel.dk/blktrace.git 454 455 Tracing also is possible using the ftrace interface, e.g.: 456 457 echo 1 > /sys/block/sda/sda1/trace/enable 458 echo blk > /sys/kernel/debug/tracing/current_tracer 459 cat /sys/kernel/debug/tracing/trace_pipe 460 461 If unsure, say N. 462 463config KPROBE_EVENTS 464 depends on KPROBES 465 depends on HAVE_REGS_AND_STACK_ACCESS_API 466 bool "Enable kprobes-based dynamic events" 467 select TRACING 468 select PROBE_EVENTS 469 select DYNAMIC_EVENTS 470 default y 471 help 472 This allows the user to add tracing events (similar to tracepoints) 473 on the fly via the ftrace interface. See 474 Documentation/trace/kprobetrace.rst for more details. 475 476 Those events can be inserted wherever kprobes can probe, and record 477 various register and memory values. 478 479 This option is also required by perf-probe subcommand of perf tools. 480 If you want to use perf tools, this option is strongly recommended. 481 482config KPROBE_EVENTS_ON_NOTRACE 483 bool "Do NOT protect notrace function from kprobe events" 484 depends on KPROBE_EVENTS 485 depends on KPROBES_ON_FTRACE 486 default n 487 help 488 This is only for the developers who want to debug ftrace itself 489 using kprobe events. 490 491 If kprobes can use ftrace instead of breakpoint, ftrace related 492 functions are protected from kprobe-events to prevent an infinit 493 recursion or any unexpected execution path which leads to a kernel 494 crash. 495 496 This option disables such protection and allows you to put kprobe 497 events on ftrace functions for debugging ftrace by itself. 498 Note that this might let you shoot yourself in the foot. 499 500 If unsure, say N. 501 502config UPROBE_EVENTS 503 bool "Enable uprobes-based dynamic events" 504 depends on ARCH_SUPPORTS_UPROBES 505 depends on MMU 506 depends on PERF_EVENTS 507 select UPROBES 508 select PROBE_EVENTS 509 select DYNAMIC_EVENTS 510 select TRACING 511 default y 512 help 513 This allows the user to add tracing events on top of userspace 514 dynamic events (similar to tracepoints) on the fly via the trace 515 events interface. Those events can be inserted wherever uprobes 516 can probe, and record various registers. 517 This option is required if you plan to use perf-probe subcommand 518 of perf tools on user space applications. 519 520config BPF_EVENTS 521 depends on BPF_SYSCALL 522 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 523 bool 524 default y 525 help 526 This allows the user to attach BPF programs to kprobe, uprobe, and 527 tracepoint events. 528 529config DYNAMIC_EVENTS 530 def_bool n 531 532config PROBE_EVENTS 533 def_bool n 534 535config DYNAMIC_FTRACE 536 bool "enable/disable function tracing dynamically" 537 depends on FUNCTION_TRACER 538 depends on HAVE_DYNAMIC_FTRACE 539 default y 540 help 541 This option will modify all the calls to function tracing 542 dynamically (will patch them out of the binary image and 543 replace them with a No-Op instruction) on boot up. During 544 compile time, a table is made of all the locations that ftrace 545 can function trace, and this table is linked into the kernel 546 image. When this is enabled, functions can be individually 547 enabled, and the functions not enabled will not affect 548 performance of the system. 549 550 See the files in /sys/kernel/debug/tracing: 551 available_filter_functions 552 set_ftrace_filter 553 set_ftrace_notrace 554 555 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 556 otherwise has native performance as long as no tracing is active. 557 558config DYNAMIC_FTRACE_WITH_REGS 559 def_bool y 560 depends on DYNAMIC_FTRACE 561 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 562 563config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 564 def_bool y 565 depends on DYNAMIC_FTRACE 566 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 567 568config FUNCTION_PROFILER 569 bool "Kernel function profiler" 570 depends on FUNCTION_TRACER 571 default n 572 help 573 This option enables the kernel function profiler. A file is created 574 in debugfs called function_profile_enabled which defaults to zero. 575 When a 1 is echoed into this file profiling begins, and when a 576 zero is entered, profiling stops. A "functions" file is created in 577 the trace_stat directory; this file shows the list of functions that 578 have been hit and their counters. 579 580 If in doubt, say N. 581 582config BPF_KPROBE_OVERRIDE 583 bool "Enable BPF programs to override a kprobed function" 584 depends on BPF_EVENTS 585 depends on FUNCTION_ERROR_INJECTION 586 default n 587 help 588 Allows BPF to override the execution of a probed function and 589 set a different return value. This is used for error injection. 590 591config FTRACE_MCOUNT_RECORD 592 def_bool y 593 depends on DYNAMIC_FTRACE 594 depends on HAVE_FTRACE_MCOUNT_RECORD 595 596config FTRACE_SELFTEST 597 bool 598 599config FTRACE_STARTUP_TEST 600 bool "Perform a startup test on ftrace" 601 depends on GENERIC_TRACER 602 select FTRACE_SELFTEST 603 help 604 This option performs a series of startup tests on ftrace. On bootup 605 a series of tests are made to verify that the tracer is 606 functioning properly. It will do tests on all the configured 607 tracers of ftrace. 608 609config EVENT_TRACE_STARTUP_TEST 610 bool "Run selftest on trace events" 611 depends on FTRACE_STARTUP_TEST 612 default y 613 help 614 This option performs a test on all trace events in the system. 615 It basically just enables each event and runs some code that 616 will trigger events (not necessarily the event it enables) 617 This may take some time run as there are a lot of events. 618 619config EVENT_TRACE_TEST_SYSCALLS 620 bool "Run selftest on syscall events" 621 depends on EVENT_TRACE_STARTUP_TEST 622 help 623 This option will also enable testing every syscall event. 624 It only enables the event and disables it and runs various loads 625 with the event enabled. This adds a bit more time for kernel boot 626 up since it runs this on every system call defined. 627 628 TBD - enable a way to actually call the syscalls as we test their 629 events 630 631config MMIOTRACE 632 bool "Memory mapped IO tracing" 633 depends on HAVE_MMIOTRACE_SUPPORT && PCI 634 select GENERIC_TRACER 635 help 636 Mmiotrace traces Memory Mapped I/O access and is meant for 637 debugging and reverse engineering. It is called from the ioremap 638 implementation and works via page faults. Tracing is disabled by 639 default and can be enabled at run-time. 640 641 See Documentation/trace/mmiotrace.rst. 642 If you are not helping to develop drivers, say N. 643 644config TRACING_MAP 645 bool 646 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 647 help 648 tracing_map is a special-purpose lock-free map for tracing, 649 separated out as a stand-alone facility in order to allow it 650 to be shared between multiple tracers. It isn't meant to be 651 generally used outside of that context, and is normally 652 selected by tracers that use it. 653 654config HIST_TRIGGERS 655 bool "Histogram triggers" 656 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 657 select TRACING_MAP 658 select TRACING 659 select DYNAMIC_EVENTS 660 default n 661 help 662 Hist triggers allow one or more arbitrary trace event fields 663 to be aggregated into hash tables and dumped to stdout by 664 reading a debugfs/tracefs file. They're useful for 665 gathering quick and dirty (though precise) summaries of 666 event activity as an initial guide for further investigation 667 using more advanced tools. 668 669 Inter-event tracing of quantities such as latencies is also 670 supported using hist triggers under this option. 671 672 See Documentation/trace/histogram.rst. 673 If in doubt, say N. 674 675config MMIOTRACE_TEST 676 tristate "Test module for mmiotrace" 677 depends on MMIOTRACE && m 678 help 679 This is a dumb module for testing mmiotrace. It is very dangerous 680 as it will write garbage to IO memory starting at a given address. 681 However, it should be safe to use on e.g. unused portion of VRAM. 682 683 Say N, unless you absolutely know what you are doing. 684 685config TRACEPOINT_BENCHMARK 686 bool "Add tracepoint that benchmarks tracepoints" 687 help 688 This option creates the tracepoint "benchmark:benchmark_event". 689 When the tracepoint is enabled, it kicks off a kernel thread that 690 goes into an infinite loop (calling cond_sched() to let other tasks 691 run), and calls the tracepoint. Each iteration will record the time 692 it took to write to the tracepoint and the next iteration that 693 data will be passed to the tracepoint itself. That is, the tracepoint 694 will report the time it took to do the previous tracepoint. 695 The string written to the tracepoint is a static string of 128 bytes 696 to keep the time the same. The initial string is simply a write of 697 "START". The second string records the cold cache time of the first 698 write which is not added to the rest of the calculations. 699 700 As it is a tight loop, it benchmarks as hot cache. That's fine because 701 we care most about hot paths that are probably in cache already. 702 703 An example of the output: 704 705 START 706 first=3672 [COLD CACHED] 707 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 708 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 709 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 710 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 711 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 712 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 713 714 715config RING_BUFFER_BENCHMARK 716 tristate "Ring buffer benchmark stress tester" 717 depends on RING_BUFFER 718 help 719 This option creates a test to stress the ring buffer and benchmark it. 720 It creates its own ring buffer such that it will not interfere with 721 any other users of the ring buffer (such as ftrace). It then creates 722 a producer and consumer that will run for 10 seconds and sleep for 723 10 seconds. Each interval it will print out the number of events 724 it recorded and give a rough estimate of how long each iteration took. 725 726 It does not disable interrupts or raise its priority, so it may be 727 affected by processes that are running. 728 729 If unsure, say N. 730 731config RING_BUFFER_STARTUP_TEST 732 bool "Ring buffer startup self test" 733 depends on RING_BUFFER 734 help 735 Run a simple self test on the ring buffer on boot up. Late in the 736 kernel boot sequence, the test will start that kicks off 737 a thread per cpu. Each thread will write various size events 738 into the ring buffer. Another thread is created to send IPIs 739 to each of the threads, where the IPI handler will also write 740 to the ring buffer, to test/stress the nesting ability. 741 If any anomalies are discovered, a warning will be displayed 742 and all ring buffers will be disabled. 743 744 The test runs for 10 seconds. This will slow your boot time 745 by at least 10 more seconds. 746 747 At the end of the test, statics and more checks are done. 748 It will output the stats of each per cpu buffer. What 749 was written, the sizes, what was read, what was lost, and 750 other similar details. 751 752 If unsure, say N 753 754config PREEMPTIRQ_DELAY_TEST 755 tristate "Preempt / IRQ disable delay thread to test latency tracers" 756 depends on m 757 help 758 Select this option to build a test module that can help test latency 759 tracers by executing a preempt or irq disable section with a user 760 configurable delay. The module busy waits for the duration of the 761 critical section. 762 763 For example, the following invocation forces a one-time irq-disabled 764 critical section for 500us: 765 modprobe preemptirq_delay_test test_mode=irq delay=500000 766 767 If unsure, say N 768 769config TRACE_EVAL_MAP_FILE 770 bool "Show eval mappings for trace events" 771 depends on TRACING 772 help 773 The "print fmt" of the trace events will show the enum/sizeof names 774 instead of their values. This can cause problems for user space tools 775 that use this string to parse the raw data as user space does not know 776 how to convert the string to its value. 777 778 To fix this, there's a special macro in the kernel that can be used 779 to convert an enum/sizeof into its value. If this macro is used, then 780 the print fmt strings will be converted to their values. 781 782 If something does not get converted properly, this option can be 783 used to show what enums/sizeof the kernel tried to convert. 784 785 This option is for debugging the conversions. A file is created 786 in the tracing directory called "eval_map" that will show the 787 names matched with their values and what trace event system they 788 belong too. 789 790 Normally, the mapping of the strings to values will be freed after 791 boot up or module load. With this option, they will not be freed, as 792 they are needed for the "eval_map" file. Enabling this option will 793 increase the memory footprint of the running kernel. 794 795 If unsure, say N 796 797config GCOV_PROFILE_FTRACE 798 bool "Enable GCOV profiling on ftrace subsystem" 799 depends on GCOV_KERNEL 800 help 801 Enable GCOV profiling on ftrace subsystem for checking 802 which functions/lines are tested. 803 804 If unsure, say N. 805 806 Note that on a kernel compiled with this config, ftrace will 807 run significantly slower. 808 809endif # FTRACE 810 811endif # TRACING_SUPPORT 812 813