1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_FTRACE_NMI_ENTER 14 bool 15 help 16 See Documentation/trace/ftrace-design.rst 17 18config HAVE_FUNCTION_TRACER 19 bool 20 help 21 See Documentation/trace/ftrace-design.rst 22 23config HAVE_FUNCTION_GRAPH_TRACER 24 bool 25 help 26 See Documentation/trace/ftrace-design.rst 27 28config HAVE_DYNAMIC_FTRACE 29 bool 30 help 31 See Documentation/trace/ftrace-design.rst 32 33config HAVE_DYNAMIC_FTRACE_WITH_REGS 34 bool 35 36config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 37 bool 38 39config HAVE_FTRACE_MCOUNT_RECORD 40 bool 41 help 42 See Documentation/trace/ftrace-design.rst 43 44config HAVE_SYSCALL_TRACEPOINTS 45 bool 46 help 47 See Documentation/trace/ftrace-design.rst 48 49config HAVE_FENTRY 50 bool 51 help 52 Arch supports the gcc options -pg with -mfentry 53 54config HAVE_NOP_MCOUNT 55 bool 56 help 57 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 58 59config HAVE_C_RECORDMCOUNT 60 bool 61 help 62 C version of recordmcount available? 63 64config TRACER_MAX_TRACE 65 bool 66 67config TRACE_CLOCK 68 bool 69 70config RING_BUFFER 71 bool 72 select TRACE_CLOCK 73 select IRQ_WORK 74 75config FTRACE_NMI_ENTER 76 bool 77 depends on HAVE_FTRACE_NMI_ENTER 78 default y 79 80config EVENT_TRACING 81 select CONTEXT_SWITCH_TRACER 82 select GLOB 83 bool 84 85config CONTEXT_SWITCH_TRACER 86 bool 87 88config RING_BUFFER_ALLOW_SWAP 89 bool 90 help 91 Allow the use of ring_buffer_swap_cpu. 92 Adds a very slight overhead to tracing when enabled. 93 94config PREEMPTIRQ_TRACEPOINTS 95 bool 96 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 97 select TRACING 98 default y 99 help 100 Create preempt/irq toggle tracepoints if needed, so that other parts 101 of the kernel can use them to generate or add hooks to them. 102 103# All tracer options should select GENERIC_TRACER. For those options that are 104# enabled by all tracers (context switch and event tracer) they select TRACING. 105# This allows those options to appear when no other tracer is selected. But the 106# options do not appear when something else selects it. We need the two options 107# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 108# hiding of the automatic options. 109 110config TRACING 111 bool 112 select RING_BUFFER 113 select STACKTRACE if STACKTRACE_SUPPORT 114 select TRACEPOINTS 115 select NOP_TRACER 116 select BINARY_PRINTF 117 select EVENT_TRACING 118 select TRACE_CLOCK 119 120config GENERIC_TRACER 121 bool 122 select TRACING 123 124# 125# Minimum requirements an architecture has to meet for us to 126# be able to offer generic tracing facilities: 127# 128config TRACING_SUPPORT 129 bool 130 depends on TRACE_IRQFLAGS_SUPPORT 131 depends on STACKTRACE_SUPPORT 132 default y 133 134if TRACING_SUPPORT 135 136menuconfig FTRACE 137 bool "Tracers" 138 default y if DEBUG_KERNEL 139 help 140 Enable the kernel tracing infrastructure. 141 142if FTRACE 143 144config BOOTTIME_TRACING 145 bool "Boot-time Tracing support" 146 depends on TRACING 147 select BOOT_CONFIG 148 default y 149 help 150 Enable developer to setup ftrace subsystem via supplemental 151 kernel cmdline at boot time for debugging (tracing) driver 152 initialization and boot process. 153 154config FUNCTION_TRACER 155 bool "Kernel Function Tracer" 156 depends on HAVE_FUNCTION_TRACER 157 select KALLSYMS 158 select GENERIC_TRACER 159 select CONTEXT_SWITCH_TRACER 160 select GLOB 161 select TASKS_RCU if PREEMPTION 162 help 163 Enable the kernel to trace every kernel function. This is done 164 by using a compiler feature to insert a small, 5-byte No-Operation 165 instruction at the beginning of every kernel function, which NOP 166 sequence is then dynamically patched into a tracer call when 167 tracing is enabled by the administrator. If it's runtime disabled 168 (the bootup default), then the overhead of the instructions is very 169 small and not measurable even in micro-benchmarks. 170 171config FUNCTION_GRAPH_TRACER 172 bool "Kernel Function Graph Tracer" 173 depends on HAVE_FUNCTION_GRAPH_TRACER 174 depends on FUNCTION_TRACER 175 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 176 default y 177 help 178 Enable the kernel to trace a function at both its return 179 and its entry. 180 Its first purpose is to trace the duration of functions and 181 draw a call graph for each thread with some information like 182 the return value. This is done by setting the current return 183 address on the current task structure into a stack of calls. 184 185config DYNAMIC_FTRACE 186 bool "enable/disable function tracing dynamically" 187 depends on FUNCTION_TRACER 188 depends on HAVE_DYNAMIC_FTRACE 189 default y 190 help 191 This option will modify all the calls to function tracing 192 dynamically (will patch them out of the binary image and 193 replace them with a No-Op instruction) on boot up. During 194 compile time, a table is made of all the locations that ftrace 195 can function trace, and this table is linked into the kernel 196 image. When this is enabled, functions can be individually 197 enabled, and the functions not enabled will not affect 198 performance of the system. 199 200 See the files in /sys/kernel/debug/tracing: 201 available_filter_functions 202 set_ftrace_filter 203 set_ftrace_notrace 204 205 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 206 otherwise has native performance as long as no tracing is active. 207 208config DYNAMIC_FTRACE_WITH_REGS 209 def_bool y 210 depends on DYNAMIC_FTRACE 211 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 212 213config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 214 def_bool y 215 depends on DYNAMIC_FTRACE 216 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 217 218config FUNCTION_PROFILER 219 bool "Kernel function profiler" 220 depends on FUNCTION_TRACER 221 default n 222 help 223 This option enables the kernel function profiler. A file is created 224 in debugfs called function_profile_enabled which defaults to zero. 225 When a 1 is echoed into this file profiling begins, and when a 226 zero is entered, profiling stops. A "functions" file is created in 227 the trace_stat directory; this file shows the list of functions that 228 have been hit and their counters. 229 230 If in doubt, say N. 231 232config STACK_TRACER 233 bool "Trace max stack" 234 depends on HAVE_FUNCTION_TRACER 235 select FUNCTION_TRACER 236 select STACKTRACE 237 select KALLSYMS 238 help 239 This special tracer records the maximum stack footprint of the 240 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 241 242 This tracer works by hooking into every function call that the 243 kernel executes, and keeping a maximum stack depth value and 244 stack-trace saved. If this is configured with DYNAMIC_FTRACE 245 then it will not have any overhead while the stack tracer 246 is disabled. 247 248 To enable the stack tracer on bootup, pass in 'stacktrace' 249 on the kernel command line. 250 251 The stack tracer can also be enabled or disabled via the 252 sysctl kernel.stack_tracer_enabled 253 254 Say N if unsure. 255 256config TRACE_PREEMPT_TOGGLE 257 bool 258 help 259 Enables hooks which will be called when preemption is first disabled, 260 and last enabled. 261 262config PREEMPTIRQ_EVENTS 263 bool "Enable trace events for preempt and irq disable/enable" 264 select TRACE_IRQFLAGS 265 select TRACE_PREEMPT_TOGGLE if PREEMPTION 266 select GENERIC_TRACER 267 default n 268 help 269 Enable tracing of disable and enable events for preemption and irqs. 270 271config IRQSOFF_TRACER 272 bool "Interrupts-off Latency Tracer" 273 default n 274 depends on TRACE_IRQFLAGS_SUPPORT 275 depends on !ARCH_USES_GETTIMEOFFSET 276 select TRACE_IRQFLAGS 277 select GENERIC_TRACER 278 select TRACER_MAX_TRACE 279 select RING_BUFFER_ALLOW_SWAP 280 select TRACER_SNAPSHOT 281 select TRACER_SNAPSHOT_PER_CPU_SWAP 282 help 283 This option measures the time spent in irqs-off critical 284 sections, with microsecond accuracy. 285 286 The default measurement method is a maximum search, which is 287 disabled by default and can be runtime (re-)started 288 via: 289 290 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 291 292 (Note that kernel size and overhead increase with this option 293 enabled. This option and the preempt-off timing option can be 294 used together or separately.) 295 296config PREEMPT_TRACER 297 bool "Preemption-off Latency Tracer" 298 default n 299 depends on !ARCH_USES_GETTIMEOFFSET 300 depends on PREEMPTION 301 select GENERIC_TRACER 302 select TRACER_MAX_TRACE 303 select RING_BUFFER_ALLOW_SWAP 304 select TRACER_SNAPSHOT 305 select TRACER_SNAPSHOT_PER_CPU_SWAP 306 select TRACE_PREEMPT_TOGGLE 307 help 308 This option measures the time spent in preemption-off critical 309 sections, with microsecond accuracy. 310 311 The default measurement method is a maximum search, which is 312 disabled by default and can be runtime (re-)started 313 via: 314 315 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 316 317 (Note that kernel size and overhead increase with this option 318 enabled. This option and the irqs-off timing option can be 319 used together or separately.) 320 321config SCHED_TRACER 322 bool "Scheduling Latency Tracer" 323 select GENERIC_TRACER 324 select CONTEXT_SWITCH_TRACER 325 select TRACER_MAX_TRACE 326 select TRACER_SNAPSHOT 327 help 328 This tracer tracks the latency of the highest priority task 329 to be scheduled in, starting from the point it has woken up. 330 331config HWLAT_TRACER 332 bool "Tracer to detect hardware latencies (like SMIs)" 333 select GENERIC_TRACER 334 help 335 This tracer, when enabled will create one or more kernel threads, 336 depending on what the cpumask file is set to, which each thread 337 spinning in a loop looking for interruptions caused by 338 something other than the kernel. For example, if a 339 System Management Interrupt (SMI) takes a noticeable amount of 340 time, this tracer will detect it. This is useful for testing 341 if a system is reliable for Real Time tasks. 342 343 Some files are created in the tracing directory when this 344 is enabled: 345 346 hwlat_detector/width - time in usecs for how long to spin for 347 hwlat_detector/window - time in usecs between the start of each 348 iteration 349 350 A kernel thread is created that will spin with interrupts disabled 351 for "width" microseconds in every "window" cycle. It will not spin 352 for "window - width" microseconds, where the system can 353 continue to operate. 354 355 The output will appear in the trace and trace_pipe files. 356 357 When the tracer is not running, it has no affect on the system, 358 but when it is running, it can cause the system to be 359 periodically non responsive. Do not run this tracer on a 360 production system. 361 362 To enable this tracer, echo in "hwlat" into the current_tracer 363 file. Every time a latency is greater than tracing_thresh, it will 364 be recorded into the ring buffer. 365 366config MMIOTRACE 367 bool "Memory mapped IO tracing" 368 depends on HAVE_MMIOTRACE_SUPPORT && PCI 369 select GENERIC_TRACER 370 help 371 Mmiotrace traces Memory Mapped I/O access and is meant for 372 debugging and reverse engineering. It is called from the ioremap 373 implementation and works via page faults. Tracing is disabled by 374 default and can be enabled at run-time. 375 376 See Documentation/trace/mmiotrace.rst. 377 If you are not helping to develop drivers, say N. 378 379config ENABLE_DEFAULT_TRACERS 380 bool "Trace process context switches and events" 381 depends on !GENERIC_TRACER 382 select TRACING 383 help 384 This tracer hooks to various trace points in the kernel, 385 allowing the user to pick and choose which trace point they 386 want to trace. It also includes the sched_switch tracer plugin. 387 388config FTRACE_SYSCALLS 389 bool "Trace syscalls" 390 depends on HAVE_SYSCALL_TRACEPOINTS 391 select GENERIC_TRACER 392 select KALLSYMS 393 help 394 Basic tracer to catch the syscall entry and exit events. 395 396config TRACER_SNAPSHOT 397 bool "Create a snapshot trace buffer" 398 select TRACER_MAX_TRACE 399 help 400 Allow tracing users to take snapshot of the current buffer using the 401 ftrace interface, e.g.: 402 403 echo 1 > /sys/kernel/debug/tracing/snapshot 404 cat snapshot 405 406config TRACER_SNAPSHOT_PER_CPU_SWAP 407 bool "Allow snapshot to swap per CPU" 408 depends on TRACER_SNAPSHOT 409 select RING_BUFFER_ALLOW_SWAP 410 help 411 Allow doing a snapshot of a single CPU buffer instead of a 412 full swap (all buffers). If this is set, then the following is 413 allowed: 414 415 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 416 417 After which, only the tracing buffer for CPU 2 was swapped with 418 the main tracing buffer, and the other CPU buffers remain the same. 419 420 When this is enabled, this adds a little more overhead to the 421 trace recording, as it needs to add some checks to synchronize 422 recording with swaps. But this does not affect the performance 423 of the overall system. This is enabled by default when the preempt 424 or irq latency tracers are enabled, as those need to swap as well 425 and already adds the overhead (plus a lot more). 426 427config TRACE_BRANCH_PROFILING 428 bool 429 select GENERIC_TRACER 430 431choice 432 prompt "Branch Profiling" 433 default BRANCH_PROFILE_NONE 434 help 435 The branch profiling is a software profiler. It will add hooks 436 into the C conditionals to test which path a branch takes. 437 438 The likely/unlikely profiler only looks at the conditions that 439 are annotated with a likely or unlikely macro. 440 441 The "all branch" profiler will profile every if-statement in the 442 kernel. This profiler will also enable the likely/unlikely 443 profiler. 444 445 Either of the above profilers adds a bit of overhead to the system. 446 If unsure, choose "No branch profiling". 447 448config BRANCH_PROFILE_NONE 449 bool "No branch profiling" 450 help 451 No branch profiling. Branch profiling adds a bit of overhead. 452 Only enable it if you want to analyse the branching behavior. 453 Otherwise keep it disabled. 454 455config PROFILE_ANNOTATED_BRANCHES 456 bool "Trace likely/unlikely profiler" 457 select TRACE_BRANCH_PROFILING 458 help 459 This tracer profiles all likely and unlikely macros 460 in the kernel. It will display the results in: 461 462 /sys/kernel/debug/tracing/trace_stat/branch_annotated 463 464 Note: this will add a significant overhead; only turn this 465 on if you need to profile the system's use of these macros. 466 467config PROFILE_ALL_BRANCHES 468 bool "Profile all if conditionals" if !FORTIFY_SOURCE 469 select TRACE_BRANCH_PROFILING 470 imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives 471 help 472 This tracer profiles all branch conditions. Every if () 473 taken in the kernel is recorded whether it hit or miss. 474 The results will be displayed in: 475 476 /sys/kernel/debug/tracing/trace_stat/branch_all 477 478 This option also enables the likely/unlikely profiler. 479 480 This configuration, when enabled, will impose a great overhead 481 on the system. This should only be enabled when the system 482 is to be analyzed in much detail. 483endchoice 484 485config TRACING_BRANCHES 486 bool 487 help 488 Selected by tracers that will trace the likely and unlikely 489 conditions. This prevents the tracers themselves from being 490 profiled. Profiling the tracing infrastructure can only happen 491 when the likelys and unlikelys are not being traced. 492 493config BRANCH_TRACER 494 bool "Trace likely/unlikely instances" 495 depends on TRACE_BRANCH_PROFILING 496 select TRACING_BRANCHES 497 help 498 This traces the events of likely and unlikely condition 499 calls in the kernel. The difference between this and the 500 "Trace likely/unlikely profiler" is that this is not a 501 histogram of the callers, but actually places the calling 502 events into a running trace buffer to see when and where the 503 events happened, as well as their results. 504 505 Say N if unsure. 506 507config BLK_DEV_IO_TRACE 508 bool "Support for tracing block IO actions" 509 depends on SYSFS 510 depends on BLOCK 511 select RELAY 512 select DEBUG_FS 513 select TRACEPOINTS 514 select GENERIC_TRACER 515 select STACKTRACE 516 help 517 Say Y here if you want to be able to trace the block layer actions 518 on a given queue. Tracing allows you to see any traffic happening 519 on a block device queue. For more information (and the userspace 520 support tools needed), fetch the blktrace tools from: 521 522 git://git.kernel.dk/blktrace.git 523 524 Tracing also is possible using the ftrace interface, e.g.: 525 526 echo 1 > /sys/block/sda/sda1/trace/enable 527 echo blk > /sys/kernel/debug/tracing/current_tracer 528 cat /sys/kernel/debug/tracing/trace_pipe 529 530 If unsure, say N. 531 532config KPROBE_EVENTS 533 depends on KPROBES 534 depends on HAVE_REGS_AND_STACK_ACCESS_API 535 bool "Enable kprobes-based dynamic events" 536 select TRACING 537 select PROBE_EVENTS 538 select DYNAMIC_EVENTS 539 default y 540 help 541 This allows the user to add tracing events (similar to tracepoints) 542 on the fly via the ftrace interface. See 543 Documentation/trace/kprobetrace.rst for more details. 544 545 Those events can be inserted wherever kprobes can probe, and record 546 various register and memory values. 547 548 This option is also required by perf-probe subcommand of perf tools. 549 If you want to use perf tools, this option is strongly recommended. 550 551config KPROBE_EVENTS_ON_NOTRACE 552 bool "Do NOT protect notrace function from kprobe events" 553 depends on KPROBE_EVENTS 554 depends on KPROBES_ON_FTRACE 555 default n 556 help 557 This is only for the developers who want to debug ftrace itself 558 using kprobe events. 559 560 If kprobes can use ftrace instead of breakpoint, ftrace related 561 functions are protected from kprobe-events to prevent an infinit 562 recursion or any unexpected execution path which leads to a kernel 563 crash. 564 565 This option disables such protection and allows you to put kprobe 566 events on ftrace functions for debugging ftrace by itself. 567 Note that this might let you shoot yourself in the foot. 568 569 If unsure, say N. 570 571config UPROBE_EVENTS 572 bool "Enable uprobes-based dynamic events" 573 depends on ARCH_SUPPORTS_UPROBES 574 depends on MMU 575 depends on PERF_EVENTS 576 select UPROBES 577 select PROBE_EVENTS 578 select DYNAMIC_EVENTS 579 select TRACING 580 default y 581 help 582 This allows the user to add tracing events on top of userspace 583 dynamic events (similar to tracepoints) on the fly via the trace 584 events interface. Those events can be inserted wherever uprobes 585 can probe, and record various registers. 586 This option is required if you plan to use perf-probe subcommand 587 of perf tools on user space applications. 588 589config BPF_EVENTS 590 depends on BPF_SYSCALL 591 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 592 bool 593 default y 594 help 595 This allows the user to attach BPF programs to kprobe, uprobe, and 596 tracepoint events. 597 598config DYNAMIC_EVENTS 599 def_bool n 600 601config PROBE_EVENTS 602 def_bool n 603 604config BPF_KPROBE_OVERRIDE 605 bool "Enable BPF programs to override a kprobed function" 606 depends on BPF_EVENTS 607 depends on FUNCTION_ERROR_INJECTION 608 default n 609 help 610 Allows BPF to override the execution of a probed function and 611 set a different return value. This is used for error injection. 612 613config FTRACE_MCOUNT_RECORD 614 def_bool y 615 depends on DYNAMIC_FTRACE 616 depends on HAVE_FTRACE_MCOUNT_RECORD 617 618config TRACING_MAP 619 bool 620 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 621 help 622 tracing_map is a special-purpose lock-free map for tracing, 623 separated out as a stand-alone facility in order to allow it 624 to be shared between multiple tracers. It isn't meant to be 625 generally used outside of that context, and is normally 626 selected by tracers that use it. 627 628config HIST_TRIGGERS 629 bool "Histogram triggers" 630 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 631 select TRACING_MAP 632 select TRACING 633 select DYNAMIC_EVENTS 634 default n 635 help 636 Hist triggers allow one or more arbitrary trace event fields 637 to be aggregated into hash tables and dumped to stdout by 638 reading a debugfs/tracefs file. They're useful for 639 gathering quick and dirty (though precise) summaries of 640 event activity as an initial guide for further investigation 641 using more advanced tools. 642 643 Inter-event tracing of quantities such as latencies is also 644 supported using hist triggers under this option. 645 646 See Documentation/trace/histogram.rst. 647 If in doubt, say N. 648 649config TRACE_EVENT_INJECT 650 bool "Trace event injection" 651 depends on TRACING 652 help 653 Allow user-space to inject a specific trace event into the ring 654 buffer. This is mainly used for testing purpose. 655 656 If unsure, say N. 657 658config TRACEPOINT_BENCHMARK 659 bool "Add tracepoint that benchmarks tracepoints" 660 help 661 This option creates the tracepoint "benchmark:benchmark_event". 662 When the tracepoint is enabled, it kicks off a kernel thread that 663 goes into an infinite loop (calling cond_sched() to let other tasks 664 run), and calls the tracepoint. Each iteration will record the time 665 it took to write to the tracepoint and the next iteration that 666 data will be passed to the tracepoint itself. That is, the tracepoint 667 will report the time it took to do the previous tracepoint. 668 The string written to the tracepoint is a static string of 128 bytes 669 to keep the time the same. The initial string is simply a write of 670 "START". The second string records the cold cache time of the first 671 write which is not added to the rest of the calculations. 672 673 As it is a tight loop, it benchmarks as hot cache. That's fine because 674 we care most about hot paths that are probably in cache already. 675 676 An example of the output: 677 678 START 679 first=3672 [COLD CACHED] 680 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 681 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 682 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 683 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 684 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 685 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 686 687 688config RING_BUFFER_BENCHMARK 689 tristate "Ring buffer benchmark stress tester" 690 depends on RING_BUFFER 691 help 692 This option creates a test to stress the ring buffer and benchmark it. 693 It creates its own ring buffer such that it will not interfere with 694 any other users of the ring buffer (such as ftrace). It then creates 695 a producer and consumer that will run for 10 seconds and sleep for 696 10 seconds. Each interval it will print out the number of events 697 it recorded and give a rough estimate of how long each iteration took. 698 699 It does not disable interrupts or raise its priority, so it may be 700 affected by processes that are running. 701 702 If unsure, say N. 703 704config TRACE_EVAL_MAP_FILE 705 bool "Show eval mappings for trace events" 706 depends on TRACING 707 help 708 The "print fmt" of the trace events will show the enum/sizeof names 709 instead of their values. This can cause problems for user space tools 710 that use this string to parse the raw data as user space does not know 711 how to convert the string to its value. 712 713 To fix this, there's a special macro in the kernel that can be used 714 to convert an enum/sizeof into its value. If this macro is used, then 715 the print fmt strings will be converted to their values. 716 717 If something does not get converted properly, this option can be 718 used to show what enums/sizeof the kernel tried to convert. 719 720 This option is for debugging the conversions. A file is created 721 in the tracing directory called "eval_map" that will show the 722 names matched with their values and what trace event system they 723 belong too. 724 725 Normally, the mapping of the strings to values will be freed after 726 boot up or module load. With this option, they will not be freed, as 727 they are needed for the "eval_map" file. Enabling this option will 728 increase the memory footprint of the running kernel. 729 730 If unsure, say N. 731 732config GCOV_PROFILE_FTRACE 733 bool "Enable GCOV profiling on ftrace subsystem" 734 depends on GCOV_KERNEL 735 help 736 Enable GCOV profiling on ftrace subsystem for checking 737 which functions/lines are tested. 738 739 If unsure, say N. 740 741 Note that on a kernel compiled with this config, ftrace will 742 run significantly slower. 743 744config FTRACE_SELFTEST 745 bool 746 747config FTRACE_STARTUP_TEST 748 bool "Perform a startup test on ftrace" 749 depends on GENERIC_TRACER 750 select FTRACE_SELFTEST 751 help 752 This option performs a series of startup tests on ftrace. On bootup 753 a series of tests are made to verify that the tracer is 754 functioning properly. It will do tests on all the configured 755 tracers of ftrace. 756 757config EVENT_TRACE_STARTUP_TEST 758 bool "Run selftest on trace events" 759 depends on FTRACE_STARTUP_TEST 760 default y 761 help 762 This option performs a test on all trace events in the system. 763 It basically just enables each event and runs some code that 764 will trigger events (not necessarily the event it enables) 765 This may take some time run as there are a lot of events. 766 767config EVENT_TRACE_TEST_SYSCALLS 768 bool "Run selftest on syscall events" 769 depends on EVENT_TRACE_STARTUP_TEST 770 help 771 This option will also enable testing every syscall event. 772 It only enables the event and disables it and runs various loads 773 with the event enabled. This adds a bit more time for kernel boot 774 up since it runs this on every system call defined. 775 776 TBD - enable a way to actually call the syscalls as we test their 777 events 778 779config RING_BUFFER_STARTUP_TEST 780 bool "Ring buffer startup self test" 781 depends on RING_BUFFER 782 help 783 Run a simple self test on the ring buffer on boot up. Late in the 784 kernel boot sequence, the test will start that kicks off 785 a thread per cpu. Each thread will write various size events 786 into the ring buffer. Another thread is created to send IPIs 787 to each of the threads, where the IPI handler will also write 788 to the ring buffer, to test/stress the nesting ability. 789 If any anomalies are discovered, a warning will be displayed 790 and all ring buffers will be disabled. 791 792 The test runs for 10 seconds. This will slow your boot time 793 by at least 10 more seconds. 794 795 At the end of the test, statics and more checks are done. 796 It will output the stats of each per cpu buffer. What 797 was written, the sizes, what was read, what was lost, and 798 other similar details. 799 800 If unsure, say N 801 802config MMIOTRACE_TEST 803 tristate "Test module for mmiotrace" 804 depends on MMIOTRACE && m 805 help 806 This is a dumb module for testing mmiotrace. It is very dangerous 807 as it will write garbage to IO memory starting at a given address. 808 However, it should be safe to use on e.g. unused portion of VRAM. 809 810 Say N, unless you absolutely know what you are doing. 811 812config PREEMPTIRQ_DELAY_TEST 813 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 814 depends on m 815 help 816 Select this option to build a test module that can help test latency 817 tracers by executing a preempt or irq disable section with a user 818 configurable delay. The module busy waits for the duration of the 819 critical section. 820 821 For example, the following invocation generates a burst of three 822 irq-disabled critical sections for 500us: 823 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 824 825 If unsure, say N 826 827config SYNTH_EVENT_GEN_TEST 828 tristate "Test module for in-kernel synthetic event generation" 829 depends on HIST_TRIGGERS 830 help 831 This option creates a test module to check the base 832 functionality of in-kernel synthetic event definition and 833 generation. 834 835 To test, insert the module, and then check the trace buffer 836 for the generated sample events. 837 838 If unsure, say N. 839 840config KPROBE_EVENT_GEN_TEST 841 tristate "Test module for in-kernel kprobe event generation" 842 depends on KPROBE_EVENTS 843 help 844 This option creates a test module to check the base 845 functionality of in-kernel kprobe event definition. 846 847 To test, insert the module, and then check the trace buffer 848 for the generated kprobe events. 849 850 If unsure, say N. 851 852endif # FTRACE 853 854endif # TRACING_SUPPORT 855 856