1# SPDX-License-Identifier: GPL-2.0-only 2# 3# Architectures that offer an FUNCTION_TRACER implementation should 4# select HAVE_FUNCTION_TRACER: 5# 6 7config USER_STACKTRACE_SUPPORT 8 bool 9 10config NOP_TRACER 11 bool 12 13config HAVE_RETHOOK 14 bool 15 16config RETHOOK 17 bool 18 depends on HAVE_RETHOOK 19 help 20 Enable generic return hooking feature. This is an internal 21 API, which will be used by other function-entry hooking 22 features like fprobe and kprobes. 23 24config HAVE_FUNCTION_TRACER 25 bool 26 help 27 See Documentation/trace/ftrace-design.rst 28 29config HAVE_FUNCTION_GRAPH_TRACER 30 bool 31 help 32 See Documentation/trace/ftrace-design.rst 33 34config HAVE_DYNAMIC_FTRACE 35 bool 36 help 37 See Documentation/trace/ftrace-design.rst 38 39config HAVE_DYNAMIC_FTRACE_WITH_REGS 40 bool 41 42config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 43 bool 44 45config HAVE_DYNAMIC_FTRACE_WITH_ARGS 46 bool 47 help 48 If this is set, then arguments and stack can be found from 49 the ftrace_regs passed into the function callback regs parameter 50 by default, even without setting the REGS flag in the ftrace_ops. 51 This allows for use of ftrace_regs_get_argument() and 52 ftrace_regs_get_stack_pointer(). 53 54config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 55 bool 56 help 57 If the architecture generates __patchable_function_entries sections 58 but does not want them included in the ftrace locations. 59 60config HAVE_FTRACE_MCOUNT_RECORD 61 bool 62 help 63 See Documentation/trace/ftrace-design.rst 64 65config HAVE_SYSCALL_TRACEPOINTS 66 bool 67 help 68 See Documentation/trace/ftrace-design.rst 69 70config HAVE_FENTRY 71 bool 72 help 73 Arch supports the gcc options -pg with -mfentry 74 75config HAVE_NOP_MCOUNT 76 bool 77 help 78 Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount 79 80config HAVE_OBJTOOL_MCOUNT 81 bool 82 help 83 Arch supports objtool --mcount 84 85config HAVE_OBJTOOL_NOP_MCOUNT 86 bool 87 help 88 Arch supports the objtool options --mcount with --mnop. 89 An architecture can select this if it wants to enable nop'ing 90 of ftrace locations. 91 92config HAVE_C_RECORDMCOUNT 93 bool 94 help 95 C version of recordmcount available? 96 97config HAVE_BUILDTIME_MCOUNT_SORT 98 bool 99 help 100 An architecture selects this if it sorts the mcount_loc section 101 at build time. 102 103config BUILDTIME_MCOUNT_SORT 104 bool 105 default y 106 depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE 107 help 108 Sort the mcount_loc section at build time. 109 110config TRACER_MAX_TRACE 111 bool 112 113config TRACE_CLOCK 114 bool 115 116config RING_BUFFER 117 bool 118 select TRACE_CLOCK 119 select IRQ_WORK 120 121config EVENT_TRACING 122 select CONTEXT_SWITCH_TRACER 123 select GLOB 124 bool 125 126config CONTEXT_SWITCH_TRACER 127 bool 128 129config RING_BUFFER_ALLOW_SWAP 130 bool 131 help 132 Allow the use of ring_buffer_swap_cpu. 133 Adds a very slight overhead to tracing when enabled. 134 135config PREEMPTIRQ_TRACEPOINTS 136 bool 137 depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 138 select TRACING 139 default y 140 help 141 Create preempt/irq toggle tracepoints if needed, so that other parts 142 of the kernel can use them to generate or add hooks to them. 143 144# All tracer options should select GENERIC_TRACER. For those options that are 145# enabled by all tracers (context switch and event tracer) they select TRACING. 146# This allows those options to appear when no other tracer is selected. But the 147# options do not appear when something else selects it. We need the two options 148# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 149# hiding of the automatic options. 150 151config TRACING 152 bool 153 select RING_BUFFER 154 select STACKTRACE if STACKTRACE_SUPPORT 155 select TRACEPOINTS 156 select NOP_TRACER 157 select BINARY_PRINTF 158 select EVENT_TRACING 159 select TRACE_CLOCK 160 select TASKS_RCU if PREEMPTION 161 162config GENERIC_TRACER 163 bool 164 select TRACING 165 166# 167# Minimum requirements an architecture has to meet for us to 168# be able to offer generic tracing facilities: 169# 170config TRACING_SUPPORT 171 bool 172 depends on TRACE_IRQFLAGS_SUPPORT 173 depends on STACKTRACE_SUPPORT 174 default y 175 176menuconfig FTRACE 177 bool "Tracers" 178 depends on TRACING_SUPPORT 179 default y if DEBUG_KERNEL 180 help 181 Enable the kernel tracing infrastructure. 182 183if FTRACE 184 185config BOOTTIME_TRACING 186 bool "Boot-time Tracing support" 187 depends on TRACING 188 select BOOT_CONFIG 189 help 190 Enable developer to setup ftrace subsystem via supplemental 191 kernel cmdline at boot time for debugging (tracing) driver 192 initialization and boot process. 193 194config FUNCTION_TRACER 195 bool "Kernel Function Tracer" 196 depends on HAVE_FUNCTION_TRACER 197 select KALLSYMS 198 select GENERIC_TRACER 199 select CONTEXT_SWITCH_TRACER 200 select GLOB 201 select TASKS_RCU if PREEMPTION 202 select TASKS_RUDE_RCU 203 help 204 Enable the kernel to trace every kernel function. This is done 205 by using a compiler feature to insert a small, 5-byte No-Operation 206 instruction at the beginning of every kernel function, which NOP 207 sequence is then dynamically patched into a tracer call when 208 tracing is enabled by the administrator. If it's runtime disabled 209 (the bootup default), then the overhead of the instructions is very 210 small and not measurable even in micro-benchmarks (at least on 211 x86, but may have impact on other architectures). 212 213config FUNCTION_GRAPH_TRACER 214 bool "Kernel Function Graph Tracer" 215 depends on HAVE_FUNCTION_GRAPH_TRACER 216 depends on FUNCTION_TRACER 217 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE 218 default y 219 help 220 Enable the kernel to trace a function at both its return 221 and its entry. 222 Its first purpose is to trace the duration of functions and 223 draw a call graph for each thread with some information like 224 the return value. This is done by setting the current return 225 address on the current task structure into a stack of calls. 226 227config DYNAMIC_FTRACE 228 bool "enable/disable function tracing dynamically" 229 depends on FUNCTION_TRACER 230 depends on HAVE_DYNAMIC_FTRACE 231 default y 232 help 233 This option will modify all the calls to function tracing 234 dynamically (will patch them out of the binary image and 235 replace them with a No-Op instruction) on boot up. During 236 compile time, a table is made of all the locations that ftrace 237 can function trace, and this table is linked into the kernel 238 image. When this is enabled, functions can be individually 239 enabled, and the functions not enabled will not affect 240 performance of the system. 241 242 See the files in /sys/kernel/debug/tracing: 243 available_filter_functions 244 set_ftrace_filter 245 set_ftrace_notrace 246 247 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 248 otherwise has native performance as long as no tracing is active. 249 250config DYNAMIC_FTRACE_WITH_REGS 251 def_bool y 252 depends on DYNAMIC_FTRACE 253 depends on HAVE_DYNAMIC_FTRACE_WITH_REGS 254 255config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 256 def_bool y 257 depends on DYNAMIC_FTRACE_WITH_REGS 258 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 259 260config DYNAMIC_FTRACE_WITH_ARGS 261 def_bool y 262 depends on DYNAMIC_FTRACE 263 depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS 264 265config FPROBE 266 bool "Kernel Function Probe (fprobe)" 267 depends on FUNCTION_TRACER 268 depends on DYNAMIC_FTRACE_WITH_REGS 269 depends on HAVE_RETHOOK 270 select RETHOOK 271 default n 272 help 273 This option enables kernel function probe (fprobe) based on ftrace. 274 The fprobe is similar to kprobes, but probes only for kernel function 275 entries and exits. This also can probe multiple functions by one 276 fprobe. 277 278 If unsure, say N. 279 280config FUNCTION_PROFILER 281 bool "Kernel function profiler" 282 depends on FUNCTION_TRACER 283 default n 284 help 285 This option enables the kernel function profiler. A file is created 286 in debugfs called function_profile_enabled which defaults to zero. 287 When a 1 is echoed into this file profiling begins, and when a 288 zero is entered, profiling stops. A "functions" file is created in 289 the trace_stat directory; this file shows the list of functions that 290 have been hit and their counters. 291 292 If in doubt, say N. 293 294config STACK_TRACER 295 bool "Trace max stack" 296 depends on HAVE_FUNCTION_TRACER 297 select FUNCTION_TRACER 298 select STACKTRACE 299 select KALLSYMS 300 help 301 This special tracer records the maximum stack footprint of the 302 kernel and displays it in /sys/kernel/debug/tracing/stack_trace. 303 304 This tracer works by hooking into every function call that the 305 kernel executes, and keeping a maximum stack depth value and 306 stack-trace saved. If this is configured with DYNAMIC_FTRACE 307 then it will not have any overhead while the stack tracer 308 is disabled. 309 310 To enable the stack tracer on bootup, pass in 'stacktrace' 311 on the kernel command line. 312 313 The stack tracer can also be enabled or disabled via the 314 sysctl kernel.stack_tracer_enabled 315 316 Say N if unsure. 317 318config TRACE_PREEMPT_TOGGLE 319 bool 320 help 321 Enables hooks which will be called when preemption is first disabled, 322 and last enabled. 323 324config IRQSOFF_TRACER 325 bool "Interrupts-off Latency Tracer" 326 default n 327 depends on TRACE_IRQFLAGS_SUPPORT 328 select TRACE_IRQFLAGS 329 select GENERIC_TRACER 330 select TRACER_MAX_TRACE 331 select RING_BUFFER_ALLOW_SWAP 332 select TRACER_SNAPSHOT 333 select TRACER_SNAPSHOT_PER_CPU_SWAP 334 help 335 This option measures the time spent in irqs-off critical 336 sections, with microsecond accuracy. 337 338 The default measurement method is a maximum search, which is 339 disabled by default and can be runtime (re-)started 340 via: 341 342 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 343 344 (Note that kernel size and overhead increase with this option 345 enabled. This option and the preempt-off timing option can be 346 used together or separately.) 347 348config PREEMPT_TRACER 349 bool "Preemption-off Latency Tracer" 350 default n 351 depends on PREEMPTION 352 select GENERIC_TRACER 353 select TRACER_MAX_TRACE 354 select RING_BUFFER_ALLOW_SWAP 355 select TRACER_SNAPSHOT 356 select TRACER_SNAPSHOT_PER_CPU_SWAP 357 select TRACE_PREEMPT_TOGGLE 358 help 359 This option measures the time spent in preemption-off critical 360 sections, with microsecond accuracy. 361 362 The default measurement method is a maximum search, which is 363 disabled by default and can be runtime (re-)started 364 via: 365 366 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency 367 368 (Note that kernel size and overhead increase with this option 369 enabled. This option and the irqs-off timing option can be 370 used together or separately.) 371 372config SCHED_TRACER 373 bool "Scheduling Latency Tracer" 374 select GENERIC_TRACER 375 select CONTEXT_SWITCH_TRACER 376 select TRACER_MAX_TRACE 377 select TRACER_SNAPSHOT 378 help 379 This tracer tracks the latency of the highest priority task 380 to be scheduled in, starting from the point it has woken up. 381 382config HWLAT_TRACER 383 bool "Tracer to detect hardware latencies (like SMIs)" 384 select GENERIC_TRACER 385 select TRACER_MAX_TRACE 386 help 387 This tracer, when enabled will create one or more kernel threads, 388 depending on what the cpumask file is set to, which each thread 389 spinning in a loop looking for interruptions caused by 390 something other than the kernel. For example, if a 391 System Management Interrupt (SMI) takes a noticeable amount of 392 time, this tracer will detect it. This is useful for testing 393 if a system is reliable for Real Time tasks. 394 395 Some files are created in the tracing directory when this 396 is enabled: 397 398 hwlat_detector/width - time in usecs for how long to spin for 399 hwlat_detector/window - time in usecs between the start of each 400 iteration 401 402 A kernel thread is created that will spin with interrupts disabled 403 for "width" microseconds in every "window" cycle. It will not spin 404 for "window - width" microseconds, where the system can 405 continue to operate. 406 407 The output will appear in the trace and trace_pipe files. 408 409 When the tracer is not running, it has no affect on the system, 410 but when it is running, it can cause the system to be 411 periodically non responsive. Do not run this tracer on a 412 production system. 413 414 To enable this tracer, echo in "hwlat" into the current_tracer 415 file. Every time a latency is greater than tracing_thresh, it will 416 be recorded into the ring buffer. 417 418config OSNOISE_TRACER 419 bool "OS Noise tracer" 420 select GENERIC_TRACER 421 select TRACER_MAX_TRACE 422 help 423 In the context of high-performance computing (HPC), the Operating 424 System Noise (osnoise) refers to the interference experienced by an 425 application due to activities inside the operating system. In the 426 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 427 can cause noise to the system. Moreover, hardware-related jobs can 428 also cause noise, for example, via SMIs. 429 430 The osnoise tracer leverages the hwlat_detector by running a similar 431 loop with preemption, SoftIRQs and IRQs enabled, thus allowing all 432 the sources of osnoise during its execution. The osnoise tracer takes 433 note of the entry and exit point of any source of interferences, 434 increasing a per-cpu interference counter. It saves an interference 435 counter for each source of interference. The interference counter for 436 NMI, IRQs, SoftIRQs, and threads is increased anytime the tool 437 observes these interferences' entry events. When a noise happens 438 without any interference from the operating system level, the 439 hardware noise counter increases, pointing to a hardware-related 440 noise. In this way, osnoise can account for any source of 441 interference. At the end of the period, the osnoise tracer prints 442 the sum of all noise, the max single noise, the percentage of CPU 443 available for the thread, and the counters for the noise sources. 444 445 In addition to the tracer, a set of tracepoints were added to 446 facilitate the identification of the osnoise source. 447 448 The output will appear in the trace and trace_pipe files. 449 450 To enable this tracer, echo in "osnoise" into the current_tracer 451 file. 452 453config TIMERLAT_TRACER 454 bool "Timerlat tracer" 455 select OSNOISE_TRACER 456 select GENERIC_TRACER 457 help 458 The timerlat tracer aims to help the preemptive kernel developers 459 to find sources of wakeup latencies of real-time threads. 460 461 The tracer creates a per-cpu kernel thread with real-time priority. 462 The tracer thread sets a periodic timer to wakeup itself, and goes 463 to sleep waiting for the timer to fire. At the wakeup, the thread 464 then computes a wakeup latency value as the difference between 465 the current time and the absolute time that the timer was set 466 to expire. 467 468 The tracer prints two lines at every activation. The first is the 469 timer latency observed at the hardirq context before the 470 activation of the thread. The second is the timer latency observed 471 by the thread, which is the same level that cyclictest reports. The 472 ACTIVATION ID field serves to relate the irq execution to its 473 respective thread execution. 474 475 The tracer is build on top of osnoise tracer, and the osnoise: 476 events can be used to trace the source of interference from NMI, 477 IRQs and other threads. It also enables the capture of the 478 stacktrace at the IRQ context, which helps to identify the code 479 path that can cause thread delay. 480 481config MMIOTRACE 482 bool "Memory mapped IO tracing" 483 depends on HAVE_MMIOTRACE_SUPPORT && PCI 484 select GENERIC_TRACER 485 help 486 Mmiotrace traces Memory Mapped I/O access and is meant for 487 debugging and reverse engineering. It is called from the ioremap 488 implementation and works via page faults. Tracing is disabled by 489 default and can be enabled at run-time. 490 491 See Documentation/trace/mmiotrace.rst. 492 If you are not helping to develop drivers, say N. 493 494config ENABLE_DEFAULT_TRACERS 495 bool "Trace process context switches and events" 496 depends on !GENERIC_TRACER 497 select TRACING 498 help 499 This tracer hooks to various trace points in the kernel, 500 allowing the user to pick and choose which trace point they 501 want to trace. It also includes the sched_switch tracer plugin. 502 503config FTRACE_SYSCALLS 504 bool "Trace syscalls" 505 depends on HAVE_SYSCALL_TRACEPOINTS 506 select GENERIC_TRACER 507 select KALLSYMS 508 help 509 Basic tracer to catch the syscall entry and exit events. 510 511config TRACER_SNAPSHOT 512 bool "Create a snapshot trace buffer" 513 select TRACER_MAX_TRACE 514 help 515 Allow tracing users to take snapshot of the current buffer using the 516 ftrace interface, e.g.: 517 518 echo 1 > /sys/kernel/debug/tracing/snapshot 519 cat snapshot 520 521config TRACER_SNAPSHOT_PER_CPU_SWAP 522 bool "Allow snapshot to swap per CPU" 523 depends on TRACER_SNAPSHOT 524 select RING_BUFFER_ALLOW_SWAP 525 help 526 Allow doing a snapshot of a single CPU buffer instead of a 527 full swap (all buffers). If this is set, then the following is 528 allowed: 529 530 echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot 531 532 After which, only the tracing buffer for CPU 2 was swapped with 533 the main tracing buffer, and the other CPU buffers remain the same. 534 535 When this is enabled, this adds a little more overhead to the 536 trace recording, as it needs to add some checks to synchronize 537 recording with swaps. But this does not affect the performance 538 of the overall system. This is enabled by default when the preempt 539 or irq latency tracers are enabled, as those need to swap as well 540 and already adds the overhead (plus a lot more). 541 542config TRACE_BRANCH_PROFILING 543 bool 544 select GENERIC_TRACER 545 546choice 547 prompt "Branch Profiling" 548 default BRANCH_PROFILE_NONE 549 help 550 The branch profiling is a software profiler. It will add hooks 551 into the C conditionals to test which path a branch takes. 552 553 The likely/unlikely profiler only looks at the conditions that 554 are annotated with a likely or unlikely macro. 555 556 The "all branch" profiler will profile every if-statement in the 557 kernel. This profiler will also enable the likely/unlikely 558 profiler. 559 560 Either of the above profilers adds a bit of overhead to the system. 561 If unsure, choose "No branch profiling". 562 563config BRANCH_PROFILE_NONE 564 bool "No branch profiling" 565 help 566 No branch profiling. Branch profiling adds a bit of overhead. 567 Only enable it if you want to analyse the branching behavior. 568 Otherwise keep it disabled. 569 570config PROFILE_ANNOTATED_BRANCHES 571 bool "Trace likely/unlikely profiler" 572 select TRACE_BRANCH_PROFILING 573 help 574 This tracer profiles all likely and unlikely macros 575 in the kernel. It will display the results in: 576 577 /sys/kernel/debug/tracing/trace_stat/branch_annotated 578 579 Note: this will add a significant overhead; only turn this 580 on if you need to profile the system's use of these macros. 581 582config PROFILE_ALL_BRANCHES 583 bool "Profile all if conditionals" if !FORTIFY_SOURCE 584 select TRACE_BRANCH_PROFILING 585 help 586 This tracer profiles all branch conditions. Every if () 587 taken in the kernel is recorded whether it hit or miss. 588 The results will be displayed in: 589 590 /sys/kernel/debug/tracing/trace_stat/branch_all 591 592 This option also enables the likely/unlikely profiler. 593 594 This configuration, when enabled, will impose a great overhead 595 on the system. This should only be enabled when the system 596 is to be analyzed in much detail. 597endchoice 598 599config TRACING_BRANCHES 600 bool 601 help 602 Selected by tracers that will trace the likely and unlikely 603 conditions. This prevents the tracers themselves from being 604 profiled. Profiling the tracing infrastructure can only happen 605 when the likelys and unlikelys are not being traced. 606 607config BRANCH_TRACER 608 bool "Trace likely/unlikely instances" 609 depends on TRACE_BRANCH_PROFILING 610 select TRACING_BRANCHES 611 help 612 This traces the events of likely and unlikely condition 613 calls in the kernel. The difference between this and the 614 "Trace likely/unlikely profiler" is that this is not a 615 histogram of the callers, but actually places the calling 616 events into a running trace buffer to see when and where the 617 events happened, as well as their results. 618 619 Say N if unsure. 620 621config BLK_DEV_IO_TRACE 622 bool "Support for tracing block IO actions" 623 depends on SYSFS 624 depends on BLOCK 625 select RELAY 626 select DEBUG_FS 627 select TRACEPOINTS 628 select GENERIC_TRACER 629 select STACKTRACE 630 help 631 Say Y here if you want to be able to trace the block layer actions 632 on a given queue. Tracing allows you to see any traffic happening 633 on a block device queue. For more information (and the userspace 634 support tools needed), fetch the blktrace tools from: 635 636 git://git.kernel.dk/blktrace.git 637 638 Tracing also is possible using the ftrace interface, e.g.: 639 640 echo 1 > /sys/block/sda/sda1/trace/enable 641 echo blk > /sys/kernel/debug/tracing/current_tracer 642 cat /sys/kernel/debug/tracing/trace_pipe 643 644 If unsure, say N. 645 646config KPROBE_EVENTS 647 depends on KPROBES 648 depends on HAVE_REGS_AND_STACK_ACCESS_API 649 bool "Enable kprobes-based dynamic events" 650 select TRACING 651 select PROBE_EVENTS 652 select DYNAMIC_EVENTS 653 default y 654 help 655 This allows the user to add tracing events (similar to tracepoints) 656 on the fly via the ftrace interface. See 657 Documentation/trace/kprobetrace.rst for more details. 658 659 Those events can be inserted wherever kprobes can probe, and record 660 various register and memory values. 661 662 This option is also required by perf-probe subcommand of perf tools. 663 If you want to use perf tools, this option is strongly recommended. 664 665config KPROBE_EVENTS_ON_NOTRACE 666 bool "Do NOT protect notrace function from kprobe events" 667 depends on KPROBE_EVENTS 668 depends on DYNAMIC_FTRACE 669 default n 670 help 671 This is only for the developers who want to debug ftrace itself 672 using kprobe events. 673 674 If kprobes can use ftrace instead of breakpoint, ftrace related 675 functions are protected from kprobe-events to prevent an infinite 676 recursion or any unexpected execution path which leads to a kernel 677 crash. 678 679 This option disables such protection and allows you to put kprobe 680 events on ftrace functions for debugging ftrace by itself. 681 Note that this might let you shoot yourself in the foot. 682 683 If unsure, say N. 684 685config UPROBE_EVENTS 686 bool "Enable uprobes-based dynamic events" 687 depends on ARCH_SUPPORTS_UPROBES 688 depends on MMU 689 depends on PERF_EVENTS 690 select UPROBES 691 select PROBE_EVENTS 692 select DYNAMIC_EVENTS 693 select TRACING 694 default y 695 help 696 This allows the user to add tracing events on top of userspace 697 dynamic events (similar to tracepoints) on the fly via the trace 698 events interface. Those events can be inserted wherever uprobes 699 can probe, and record various registers. 700 This option is required if you plan to use perf-probe subcommand 701 of perf tools on user space applications. 702 703config BPF_EVENTS 704 depends on BPF_SYSCALL 705 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS 706 bool 707 default y 708 help 709 This allows the user to attach BPF programs to kprobe, uprobe, and 710 tracepoint events. 711 712config DYNAMIC_EVENTS 713 def_bool n 714 715config PROBE_EVENTS 716 def_bool n 717 718config BPF_KPROBE_OVERRIDE 719 bool "Enable BPF programs to override a kprobed function" 720 depends on BPF_EVENTS 721 depends on FUNCTION_ERROR_INJECTION 722 default n 723 help 724 Allows BPF to override the execution of a probed function and 725 set a different return value. This is used for error injection. 726 727config FTRACE_MCOUNT_RECORD 728 def_bool y 729 depends on DYNAMIC_FTRACE 730 depends on HAVE_FTRACE_MCOUNT_RECORD 731 732config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 733 bool 734 depends on FTRACE_MCOUNT_RECORD 735 736config FTRACE_MCOUNT_USE_CC 737 def_bool y 738 depends on $(cc-option,-mrecord-mcount) 739 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 740 depends on FTRACE_MCOUNT_RECORD 741 742config FTRACE_MCOUNT_USE_OBJTOOL 743 def_bool y 744 depends on HAVE_OBJTOOL_MCOUNT 745 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 746 depends on !FTRACE_MCOUNT_USE_CC 747 depends on FTRACE_MCOUNT_RECORD 748 select OBJTOOL 749 750config FTRACE_MCOUNT_USE_RECORDMCOUNT 751 def_bool y 752 depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY 753 depends on !FTRACE_MCOUNT_USE_CC 754 depends on !FTRACE_MCOUNT_USE_OBJTOOL 755 depends on FTRACE_MCOUNT_RECORD 756 757config TRACING_MAP 758 bool 759 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 760 help 761 tracing_map is a special-purpose lock-free map for tracing, 762 separated out as a stand-alone facility in order to allow it 763 to be shared between multiple tracers. It isn't meant to be 764 generally used outside of that context, and is normally 765 selected by tracers that use it. 766 767config SYNTH_EVENTS 768 bool "Synthetic trace events" 769 select TRACING 770 select DYNAMIC_EVENTS 771 default n 772 help 773 Synthetic events are user-defined trace events that can be 774 used to combine data from other trace events or in fact any 775 data source. Synthetic events can be generated indirectly 776 via the trace() action of histogram triggers or directly 777 by way of an in-kernel API. 778 779 See Documentation/trace/events.rst or 780 Documentation/trace/histogram.rst for details and examples. 781 782 If in doubt, say N. 783 784config USER_EVENTS 785 bool "User trace events" 786 select TRACING 787 select DYNAMIC_EVENTS 788 depends on BROKEN || COMPILE_TEST # API needs to be straighten out 789 help 790 User trace events are user-defined trace events that 791 can be used like an existing kernel trace event. User trace 792 events are generated by writing to a tracefs file. User 793 processes can determine if their tracing events should be 794 generated by memory mapping a tracefs file and checking for 795 an associated byte being non-zero. 796 797 If in doubt, say N. 798 799config HIST_TRIGGERS 800 bool "Histogram triggers" 801 depends on ARCH_HAVE_NMI_SAFE_CMPXCHG 802 select TRACING_MAP 803 select TRACING 804 select DYNAMIC_EVENTS 805 select SYNTH_EVENTS 806 default n 807 help 808 Hist triggers allow one or more arbitrary trace event fields 809 to be aggregated into hash tables and dumped to stdout by 810 reading a debugfs/tracefs file. They're useful for 811 gathering quick and dirty (though precise) summaries of 812 event activity as an initial guide for further investigation 813 using more advanced tools. 814 815 Inter-event tracing of quantities such as latencies is also 816 supported using hist triggers under this option. 817 818 See Documentation/trace/histogram.rst. 819 If in doubt, say N. 820 821config TRACE_EVENT_INJECT 822 bool "Trace event injection" 823 depends on TRACING 824 help 825 Allow user-space to inject a specific trace event into the ring 826 buffer. This is mainly used for testing purpose. 827 828 If unsure, say N. 829 830config TRACEPOINT_BENCHMARK 831 bool "Add tracepoint that benchmarks tracepoints" 832 help 833 This option creates the tracepoint "benchmark:benchmark_event". 834 When the tracepoint is enabled, it kicks off a kernel thread that 835 goes into an infinite loop (calling cond_resched() to let other tasks 836 run), and calls the tracepoint. Each iteration will record the time 837 it took to write to the tracepoint and the next iteration that 838 data will be passed to the tracepoint itself. That is, the tracepoint 839 will report the time it took to do the previous tracepoint. 840 The string written to the tracepoint is a static string of 128 bytes 841 to keep the time the same. The initial string is simply a write of 842 "START". The second string records the cold cache time of the first 843 write which is not added to the rest of the calculations. 844 845 As it is a tight loop, it benchmarks as hot cache. That's fine because 846 we care most about hot paths that are probably in cache already. 847 848 An example of the output: 849 850 START 851 first=3672 [COLD CACHED] 852 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 853 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 854 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 855 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 856 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 857 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666 858 859 860config RING_BUFFER_BENCHMARK 861 tristate "Ring buffer benchmark stress tester" 862 depends on RING_BUFFER 863 help 864 This option creates a test to stress the ring buffer and benchmark it. 865 It creates its own ring buffer such that it will not interfere with 866 any other users of the ring buffer (such as ftrace). It then creates 867 a producer and consumer that will run for 10 seconds and sleep for 868 10 seconds. Each interval it will print out the number of events 869 it recorded and give a rough estimate of how long each iteration took. 870 871 It does not disable interrupts or raise its priority, so it may be 872 affected by processes that are running. 873 874 If unsure, say N. 875 876config TRACE_EVAL_MAP_FILE 877 bool "Show eval mappings for trace events" 878 depends on TRACING 879 help 880 The "print fmt" of the trace events will show the enum/sizeof names 881 instead of their values. This can cause problems for user space tools 882 that use this string to parse the raw data as user space does not know 883 how to convert the string to its value. 884 885 To fix this, there's a special macro in the kernel that can be used 886 to convert an enum/sizeof into its value. If this macro is used, then 887 the print fmt strings will be converted to their values. 888 889 If something does not get converted properly, this option can be 890 used to show what enums/sizeof the kernel tried to convert. 891 892 This option is for debugging the conversions. A file is created 893 in the tracing directory called "eval_map" that will show the 894 names matched with their values and what trace event system they 895 belong too. 896 897 Normally, the mapping of the strings to values will be freed after 898 boot up or module load. With this option, they will not be freed, as 899 they are needed for the "eval_map" file. Enabling this option will 900 increase the memory footprint of the running kernel. 901 902 If unsure, say N. 903 904config FTRACE_RECORD_RECURSION 905 bool "Record functions that recurse in function tracing" 906 depends on FUNCTION_TRACER 907 help 908 All callbacks that attach to the function tracing have some sort 909 of protection against recursion. Even though the protection exists, 910 it adds overhead. This option will create a file in the tracefs 911 file system called "recursed_functions" that will list the functions 912 that triggered a recursion. 913 914 This will add more overhead to cases that have recursion. 915 916 If unsure, say N 917 918config FTRACE_RECORD_RECURSION_SIZE 919 int "Max number of recursed functions to record" 920 default 128 921 depends on FTRACE_RECORD_RECURSION 922 help 923 This defines the limit of number of functions that can be 924 listed in the "recursed_functions" file, that lists all 925 the functions that caused a recursion to happen. 926 This file can be reset, but the limit can not change in 927 size at runtime. 928 929config RING_BUFFER_RECORD_RECURSION 930 bool "Record functions that recurse in the ring buffer" 931 depends on FTRACE_RECORD_RECURSION 932 # default y, because it is coupled with FTRACE_RECORD_RECURSION 933 default y 934 help 935 The ring buffer has its own internal recursion. Although when 936 recursion happens it wont cause harm because of the protection, 937 but it does cause an unwanted overhead. Enabling this option will 938 place where recursion was detected into the ftrace "recursed_functions" 939 file. 940 941 This will add more overhead to cases that have recursion. 942 943config GCOV_PROFILE_FTRACE 944 bool "Enable GCOV profiling on ftrace subsystem" 945 depends on GCOV_KERNEL 946 help 947 Enable GCOV profiling on ftrace subsystem for checking 948 which functions/lines are tested. 949 950 If unsure, say N. 951 952 Note that on a kernel compiled with this config, ftrace will 953 run significantly slower. 954 955config FTRACE_SELFTEST 956 bool 957 958config FTRACE_STARTUP_TEST 959 bool "Perform a startup test on ftrace" 960 depends on GENERIC_TRACER 961 select FTRACE_SELFTEST 962 help 963 This option performs a series of startup tests on ftrace. On bootup 964 a series of tests are made to verify that the tracer is 965 functioning properly. It will do tests on all the configured 966 tracers of ftrace. 967 968config EVENT_TRACE_STARTUP_TEST 969 bool "Run selftest on trace events" 970 depends on FTRACE_STARTUP_TEST 971 default y 972 help 973 This option performs a test on all trace events in the system. 974 It basically just enables each event and runs some code that 975 will trigger events (not necessarily the event it enables) 976 This may take some time run as there are a lot of events. 977 978config EVENT_TRACE_TEST_SYSCALLS 979 bool "Run selftest on syscall events" 980 depends on EVENT_TRACE_STARTUP_TEST 981 help 982 This option will also enable testing every syscall event. 983 It only enables the event and disables it and runs various loads 984 with the event enabled. This adds a bit more time for kernel boot 985 up since it runs this on every system call defined. 986 987 TBD - enable a way to actually call the syscalls as we test their 988 events 989 990config FTRACE_SORT_STARTUP_TEST 991 bool "Verify compile time sorting of ftrace functions" 992 depends on DYNAMIC_FTRACE 993 depends on BUILDTIME_MCOUNT_SORT 994 help 995 Sorting of the mcount_loc sections that is used to find the 996 where the ftrace knows where to patch functions for tracing 997 and other callbacks is done at compile time. But if the sort 998 is not done correctly, it will cause non-deterministic failures. 999 When this is set, the sorted sections will be verified that they 1000 are in deed sorted and will warn if they are not. 1001 1002 If unsure, say N 1003 1004config RING_BUFFER_STARTUP_TEST 1005 bool "Ring buffer startup self test" 1006 depends on RING_BUFFER 1007 help 1008 Run a simple self test on the ring buffer on boot up. Late in the 1009 kernel boot sequence, the test will start that kicks off 1010 a thread per cpu. Each thread will write various size events 1011 into the ring buffer. Another thread is created to send IPIs 1012 to each of the threads, where the IPI handler will also write 1013 to the ring buffer, to test/stress the nesting ability. 1014 If any anomalies are discovered, a warning will be displayed 1015 and all ring buffers will be disabled. 1016 1017 The test runs for 10 seconds. This will slow your boot time 1018 by at least 10 more seconds. 1019 1020 At the end of the test, statics and more checks are done. 1021 It will output the stats of each per cpu buffer. What 1022 was written, the sizes, what was read, what was lost, and 1023 other similar details. 1024 1025 If unsure, say N 1026 1027config RING_BUFFER_VALIDATE_TIME_DELTAS 1028 bool "Verify ring buffer time stamp deltas" 1029 depends on RING_BUFFER 1030 help 1031 This will audit the time stamps on the ring buffer sub 1032 buffer to make sure that all the time deltas for the 1033 events on a sub buffer matches the current time stamp. 1034 This audit is performed for every event that is not 1035 interrupted, or interrupting another event. A check 1036 is also made when traversing sub buffers to make sure 1037 that all the deltas on the previous sub buffer do not 1038 add up to be greater than the current time stamp. 1039 1040 NOTE: This adds significant overhead to recording of events, 1041 and should only be used to test the logic of the ring buffer. 1042 Do not use it on production systems. 1043 1044 Only say Y if you understand what this does, and you 1045 still want it enabled. Otherwise say N 1046 1047config MMIOTRACE_TEST 1048 tristate "Test module for mmiotrace" 1049 depends on MMIOTRACE && m 1050 help 1051 This is a dumb module for testing mmiotrace. It is very dangerous 1052 as it will write garbage to IO memory starting at a given address. 1053 However, it should be safe to use on e.g. unused portion of VRAM. 1054 1055 Say N, unless you absolutely know what you are doing. 1056 1057config PREEMPTIRQ_DELAY_TEST 1058 tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers" 1059 depends on m 1060 help 1061 Select this option to build a test module that can help test latency 1062 tracers by executing a preempt or irq disable section with a user 1063 configurable delay. The module busy waits for the duration of the 1064 critical section. 1065 1066 For example, the following invocation generates a burst of three 1067 irq-disabled critical sections for 500us: 1068 modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3 1069 1070 What's more, if you want to attach the test on the cpu which the latency 1071 tracer is running on, specify cpu_affinity=cpu_num at the end of the 1072 command. 1073 1074 If unsure, say N 1075 1076config SYNTH_EVENT_GEN_TEST 1077 tristate "Test module for in-kernel synthetic event generation" 1078 depends on SYNTH_EVENTS 1079 help 1080 This option creates a test module to check the base 1081 functionality of in-kernel synthetic event definition and 1082 generation. 1083 1084 To test, insert the module, and then check the trace buffer 1085 for the generated sample events. 1086 1087 If unsure, say N. 1088 1089config KPROBE_EVENT_GEN_TEST 1090 tristate "Test module for in-kernel kprobe event generation" 1091 depends on KPROBE_EVENTS 1092 help 1093 This option creates a test module to check the base 1094 functionality of in-kernel kprobe event definition. 1095 1096 To test, insert the module, and then check the trace buffer 1097 for the generated kprobe events. 1098 1099 If unsure, say N. 1100 1101config HIST_TRIGGERS_DEBUG 1102 bool "Hist trigger debug support" 1103 depends on HIST_TRIGGERS 1104 help 1105 Add "hist_debug" file for each event, which when read will 1106 dump out a bunch of internal details about the hist triggers 1107 defined on that event. 1108 1109 The hist_debug file serves a couple of purposes: 1110 1111 - Helps developers verify that nothing is broken. 1112 1113 - Provides educational information to support the details 1114 of the hist trigger internals as described by 1115 Documentation/trace/histogram-design.rst. 1116 1117 The hist_debug output only covers the data structures 1118 related to the histogram definitions themselves and doesn't 1119 display the internals of map buckets or variable values of 1120 running histograms. 1121 1122 If unsure, say N. 1123 1124source "kernel/trace/rv/Kconfig" 1125 1126endif # FTRACE 1127