xref: /openbmc/linux/kernel/trace/Kconfig (revision 0b26ca68)
1 # SPDX-License-Identifier: GPL-2.0-only
2 #
3 # Architectures that offer an FUNCTION_TRACER implementation should
4 #  select HAVE_FUNCTION_TRACER:
5 #
6 
7 config USER_STACKTRACE_SUPPORT
8 	bool
9 
10 config NOP_TRACER
11 	bool
12 
13 config HAVE_FUNCTION_TRACER
14 	bool
15 	help
16 	  See Documentation/trace/ftrace-design.rst
17 
18 config HAVE_FUNCTION_GRAPH_TRACER
19 	bool
20 	help
21 	  See Documentation/trace/ftrace-design.rst
22 
23 config HAVE_DYNAMIC_FTRACE
24 	bool
25 	help
26 	  See Documentation/trace/ftrace-design.rst
27 
28 config HAVE_DYNAMIC_FTRACE_WITH_REGS
29 	bool
30 
31 config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
32 	bool
33 
34 config HAVE_DYNAMIC_FTRACE_WITH_ARGS
35 	bool
36 	help
37 	 If this is set, then arguments and stack can be found from
38 	 the pt_regs passed into the function callback regs parameter
39 	 by default, even without setting the REGS flag in the ftrace_ops.
40 	 This allows for use of regs_get_kernel_argument() and
41 	 kernel_stack_pointer().
42 
43 config HAVE_FTRACE_MCOUNT_RECORD
44 	bool
45 	help
46 	  See Documentation/trace/ftrace-design.rst
47 
48 config HAVE_SYSCALL_TRACEPOINTS
49 	bool
50 	help
51 	  See Documentation/trace/ftrace-design.rst
52 
53 config HAVE_FENTRY
54 	bool
55 	help
56 	  Arch supports the gcc options -pg with -mfentry
57 
58 config HAVE_NOP_MCOUNT
59 	bool
60 	help
61 	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
62 
63 config HAVE_C_RECORDMCOUNT
64 	bool
65 	help
66 	  C version of recordmcount available?
67 
68 config TRACER_MAX_TRACE
69 	bool
70 
71 config TRACE_CLOCK
72 	bool
73 
74 config RING_BUFFER
75 	bool
76 	select TRACE_CLOCK
77 	select IRQ_WORK
78 
79 config EVENT_TRACING
80 	select CONTEXT_SWITCH_TRACER
81 	select GLOB
82 	bool
83 
84 config CONTEXT_SWITCH_TRACER
85 	bool
86 
87 config RING_BUFFER_ALLOW_SWAP
88 	bool
89 	help
90 	 Allow the use of ring_buffer_swap_cpu.
91 	 Adds a very slight overhead to tracing when enabled.
92 
93 config PREEMPTIRQ_TRACEPOINTS
94 	bool
95 	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
96 	select TRACING
97 	default y
98 	help
99 	  Create preempt/irq toggle tracepoints if needed, so that other parts
100 	  of the kernel can use them to generate or add hooks to them.
101 
102 # All tracer options should select GENERIC_TRACER. For those options that are
103 # enabled by all tracers (context switch and event tracer) they select TRACING.
104 # This allows those options to appear when no other tracer is selected. But the
105 # options do not appear when something else selects it. We need the two options
106 # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
107 # hiding of the automatic options.
108 
109 config TRACING
110 	bool
111 	select RING_BUFFER
112 	select STACKTRACE if STACKTRACE_SUPPORT
113 	select TRACEPOINTS
114 	select NOP_TRACER
115 	select BINARY_PRINTF
116 	select EVENT_TRACING
117 	select TRACE_CLOCK
118 
119 config GENERIC_TRACER
120 	bool
121 	select TRACING
122 
123 #
124 # Minimum requirements an architecture has to meet for us to
125 # be able to offer generic tracing facilities:
126 #
127 config TRACING_SUPPORT
128 	bool
129 	depends on TRACE_IRQFLAGS_SUPPORT
130 	depends on STACKTRACE_SUPPORT
131 	default y
132 
133 if TRACING_SUPPORT
134 
135 menuconfig FTRACE
136 	bool "Tracers"
137 	default y if DEBUG_KERNEL
138 	help
139 	  Enable the kernel tracing infrastructure.
140 
141 if FTRACE
142 
143 config BOOTTIME_TRACING
144 	bool "Boot-time Tracing support"
145 	depends on TRACING
146 	select BOOT_CONFIG
147 	help
148 	  Enable developer to setup ftrace subsystem via supplemental
149 	  kernel cmdline at boot time for debugging (tracing) driver
150 	  initialization and boot process.
151 
152 config FUNCTION_TRACER
153 	bool "Kernel Function Tracer"
154 	depends on HAVE_FUNCTION_TRACER
155 	select KALLSYMS
156 	select GENERIC_TRACER
157 	select CONTEXT_SWITCH_TRACER
158 	select GLOB
159 	select TASKS_RCU if PREEMPTION
160 	select TASKS_RUDE_RCU
161 	help
162 	  Enable the kernel to trace every kernel function. This is done
163 	  by using a compiler feature to insert a small, 5-byte No-Operation
164 	  instruction at the beginning of every kernel function, which NOP
165 	  sequence is then dynamically patched into a tracer call when
166 	  tracing is enabled by the administrator. If it's runtime disabled
167 	  (the bootup default), then the overhead of the instructions is very
168 	  small and not measurable even in micro-benchmarks.
169 
170 config FUNCTION_GRAPH_TRACER
171 	bool "Kernel Function Graph Tracer"
172 	depends on HAVE_FUNCTION_GRAPH_TRACER
173 	depends on FUNCTION_TRACER
174 	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
175 	default y
176 	help
177 	  Enable the kernel to trace a function at both its return
178 	  and its entry.
179 	  Its first purpose is to trace the duration of functions and
180 	  draw a call graph for each thread with some information like
181 	  the return value. This is done by setting the current return
182 	  address on the current task structure into a stack of calls.
183 
184 config DYNAMIC_FTRACE
185 	bool "enable/disable function tracing dynamically"
186 	depends on FUNCTION_TRACER
187 	depends on HAVE_DYNAMIC_FTRACE
188 	default y
189 	help
190 	  This option will modify all the calls to function tracing
191 	  dynamically (will patch them out of the binary image and
192 	  replace them with a No-Op instruction) on boot up. During
193 	  compile time, a table is made of all the locations that ftrace
194 	  can function trace, and this table is linked into the kernel
195 	  image. When this is enabled, functions can be individually
196 	  enabled, and the functions not enabled will not affect
197 	  performance of the system.
198 
199 	  See the files in /sys/kernel/debug/tracing:
200 	    available_filter_functions
201 	    set_ftrace_filter
202 	    set_ftrace_notrace
203 
204 	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
205 	  otherwise has native performance as long as no tracing is active.
206 
207 config DYNAMIC_FTRACE_WITH_REGS
208 	def_bool y
209 	depends on DYNAMIC_FTRACE
210 	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
211 
212 config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
213 	def_bool y
214 	depends on DYNAMIC_FTRACE_WITH_REGS
215 	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
216 
217 config FUNCTION_PROFILER
218 	bool "Kernel function profiler"
219 	depends on FUNCTION_TRACER
220 	default n
221 	help
222 	  This option enables the kernel function profiler. A file is created
223 	  in debugfs called function_profile_enabled which defaults to zero.
224 	  When a 1 is echoed into this file profiling begins, and when a
225 	  zero is entered, profiling stops. A "functions" file is created in
226 	  the trace_stat directory; this file shows the list of functions that
227 	  have been hit and their counters.
228 
229 	  If in doubt, say N.
230 
231 config STACK_TRACER
232 	bool "Trace max stack"
233 	depends on HAVE_FUNCTION_TRACER
234 	select FUNCTION_TRACER
235 	select STACKTRACE
236 	select KALLSYMS
237 	help
238 	  This special tracer records the maximum stack footprint of the
239 	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
240 
241 	  This tracer works by hooking into every function call that the
242 	  kernel executes, and keeping a maximum stack depth value and
243 	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
244 	  then it will not have any overhead while the stack tracer
245 	  is disabled.
246 
247 	  To enable the stack tracer on bootup, pass in 'stacktrace'
248 	  on the kernel command line.
249 
250 	  The stack tracer can also be enabled or disabled via the
251 	  sysctl kernel.stack_tracer_enabled
252 
253 	  Say N if unsure.
254 
255 config TRACE_PREEMPT_TOGGLE
256 	bool
257 	help
258 	  Enables hooks which will be called when preemption is first disabled,
259 	  and last enabled.
260 
261 config IRQSOFF_TRACER
262 	bool "Interrupts-off Latency Tracer"
263 	default n
264 	depends on TRACE_IRQFLAGS_SUPPORT
265 	select TRACE_IRQFLAGS
266 	select GENERIC_TRACER
267 	select TRACER_MAX_TRACE
268 	select RING_BUFFER_ALLOW_SWAP
269 	select TRACER_SNAPSHOT
270 	select TRACER_SNAPSHOT_PER_CPU_SWAP
271 	help
272 	  This option measures the time spent in irqs-off critical
273 	  sections, with microsecond accuracy.
274 
275 	  The default measurement method is a maximum search, which is
276 	  disabled by default and can be runtime (re-)started
277 	  via:
278 
279 	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
280 
281 	  (Note that kernel size and overhead increase with this option
282 	  enabled. This option and the preempt-off timing option can be
283 	  used together or separately.)
284 
285 config PREEMPT_TRACER
286 	bool "Preemption-off Latency Tracer"
287 	default n
288 	depends on PREEMPTION
289 	select GENERIC_TRACER
290 	select TRACER_MAX_TRACE
291 	select RING_BUFFER_ALLOW_SWAP
292 	select TRACER_SNAPSHOT
293 	select TRACER_SNAPSHOT_PER_CPU_SWAP
294 	select TRACE_PREEMPT_TOGGLE
295 	help
296 	  This option measures the time spent in preemption-off critical
297 	  sections, with microsecond accuracy.
298 
299 	  The default measurement method is a maximum search, which is
300 	  disabled by default and can be runtime (re-)started
301 	  via:
302 
303 	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
304 
305 	  (Note that kernel size and overhead increase with this option
306 	  enabled. This option and the irqs-off timing option can be
307 	  used together or separately.)
308 
309 config SCHED_TRACER
310 	bool "Scheduling Latency Tracer"
311 	select GENERIC_TRACER
312 	select CONTEXT_SWITCH_TRACER
313 	select TRACER_MAX_TRACE
314 	select TRACER_SNAPSHOT
315 	help
316 	  This tracer tracks the latency of the highest priority task
317 	  to be scheduled in, starting from the point it has woken up.
318 
319 config HWLAT_TRACER
320 	bool "Tracer to detect hardware latencies (like SMIs)"
321 	select GENERIC_TRACER
322 	help
323 	 This tracer, when enabled will create one or more kernel threads,
324 	 depending on what the cpumask file is set to, which each thread
325 	 spinning in a loop looking for interruptions caused by
326 	 something other than the kernel. For example, if a
327 	 System Management Interrupt (SMI) takes a noticeable amount of
328 	 time, this tracer will detect it. This is useful for testing
329 	 if a system is reliable for Real Time tasks.
330 
331 	 Some files are created in the tracing directory when this
332 	 is enabled:
333 
334 	   hwlat_detector/width   - time in usecs for how long to spin for
335 	   hwlat_detector/window  - time in usecs between the start of each
336 				     iteration
337 
338 	 A kernel thread is created that will spin with interrupts disabled
339 	 for "width" microseconds in every "window" cycle. It will not spin
340 	 for "window - width" microseconds, where the system can
341 	 continue to operate.
342 
343 	 The output will appear in the trace and trace_pipe files.
344 
345 	 When the tracer is not running, it has no affect on the system,
346 	 but when it is running, it can cause the system to be
347 	 periodically non responsive. Do not run this tracer on a
348 	 production system.
349 
350 	 To enable this tracer, echo in "hwlat" into the current_tracer
351 	 file. Every time a latency is greater than tracing_thresh, it will
352 	 be recorded into the ring buffer.
353 
354 config MMIOTRACE
355 	bool "Memory mapped IO tracing"
356 	depends on HAVE_MMIOTRACE_SUPPORT && PCI
357 	select GENERIC_TRACER
358 	help
359 	  Mmiotrace traces Memory Mapped I/O access and is meant for
360 	  debugging and reverse engineering. It is called from the ioremap
361 	  implementation and works via page faults. Tracing is disabled by
362 	  default and can be enabled at run-time.
363 
364 	  See Documentation/trace/mmiotrace.rst.
365 	  If you are not helping to develop drivers, say N.
366 
367 config ENABLE_DEFAULT_TRACERS
368 	bool "Trace process context switches and events"
369 	depends on !GENERIC_TRACER
370 	select TRACING
371 	help
372 	  This tracer hooks to various trace points in the kernel,
373 	  allowing the user to pick and choose which trace point they
374 	  want to trace. It also includes the sched_switch tracer plugin.
375 
376 config FTRACE_SYSCALLS
377 	bool "Trace syscalls"
378 	depends on HAVE_SYSCALL_TRACEPOINTS
379 	select GENERIC_TRACER
380 	select KALLSYMS
381 	help
382 	  Basic tracer to catch the syscall entry and exit events.
383 
384 config TRACER_SNAPSHOT
385 	bool "Create a snapshot trace buffer"
386 	select TRACER_MAX_TRACE
387 	help
388 	  Allow tracing users to take snapshot of the current buffer using the
389 	  ftrace interface, e.g.:
390 
391 	      echo 1 > /sys/kernel/debug/tracing/snapshot
392 	      cat snapshot
393 
394 config TRACER_SNAPSHOT_PER_CPU_SWAP
395 	bool "Allow snapshot to swap per CPU"
396 	depends on TRACER_SNAPSHOT
397 	select RING_BUFFER_ALLOW_SWAP
398 	help
399 	  Allow doing a snapshot of a single CPU buffer instead of a
400 	  full swap (all buffers). If this is set, then the following is
401 	  allowed:
402 
403 	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
404 
405 	  After which, only the tracing buffer for CPU 2 was swapped with
406 	  the main tracing buffer, and the other CPU buffers remain the same.
407 
408 	  When this is enabled, this adds a little more overhead to the
409 	  trace recording, as it needs to add some checks to synchronize
410 	  recording with swaps. But this does not affect the performance
411 	  of the overall system. This is enabled by default when the preempt
412 	  or irq latency tracers are enabled, as those need to swap as well
413 	  and already adds the overhead (plus a lot more).
414 
415 config TRACE_BRANCH_PROFILING
416 	bool
417 	select GENERIC_TRACER
418 
419 choice
420 	prompt "Branch Profiling"
421 	default BRANCH_PROFILE_NONE
422 	help
423 	 The branch profiling is a software profiler. It will add hooks
424 	 into the C conditionals to test which path a branch takes.
425 
426 	 The likely/unlikely profiler only looks at the conditions that
427 	 are annotated with a likely or unlikely macro.
428 
429 	 The "all branch" profiler will profile every if-statement in the
430 	 kernel. This profiler will also enable the likely/unlikely
431 	 profiler.
432 
433 	 Either of the above profilers adds a bit of overhead to the system.
434 	 If unsure, choose "No branch profiling".
435 
436 config BRANCH_PROFILE_NONE
437 	bool "No branch profiling"
438 	help
439 	  No branch profiling. Branch profiling adds a bit of overhead.
440 	  Only enable it if you want to analyse the branching behavior.
441 	  Otherwise keep it disabled.
442 
443 config PROFILE_ANNOTATED_BRANCHES
444 	bool "Trace likely/unlikely profiler"
445 	select TRACE_BRANCH_PROFILING
446 	help
447 	  This tracer profiles all likely and unlikely macros
448 	  in the kernel. It will display the results in:
449 
450 	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
451 
452 	  Note: this will add a significant overhead; only turn this
453 	  on if you need to profile the system's use of these macros.
454 
455 config PROFILE_ALL_BRANCHES
456 	bool "Profile all if conditionals" if !FORTIFY_SOURCE
457 	select TRACE_BRANCH_PROFILING
458 	help
459 	  This tracer profiles all branch conditions. Every if ()
460 	  taken in the kernel is recorded whether it hit or miss.
461 	  The results will be displayed in:
462 
463 	  /sys/kernel/debug/tracing/trace_stat/branch_all
464 
465 	  This option also enables the likely/unlikely profiler.
466 
467 	  This configuration, when enabled, will impose a great overhead
468 	  on the system. This should only be enabled when the system
469 	  is to be analyzed in much detail.
470 endchoice
471 
472 config TRACING_BRANCHES
473 	bool
474 	help
475 	  Selected by tracers that will trace the likely and unlikely
476 	  conditions. This prevents the tracers themselves from being
477 	  profiled. Profiling the tracing infrastructure can only happen
478 	  when the likelys and unlikelys are not being traced.
479 
480 config BRANCH_TRACER
481 	bool "Trace likely/unlikely instances"
482 	depends on TRACE_BRANCH_PROFILING
483 	select TRACING_BRANCHES
484 	help
485 	  This traces the events of likely and unlikely condition
486 	  calls in the kernel.  The difference between this and the
487 	  "Trace likely/unlikely profiler" is that this is not a
488 	  histogram of the callers, but actually places the calling
489 	  events into a running trace buffer to see when and where the
490 	  events happened, as well as their results.
491 
492 	  Say N if unsure.
493 
494 config BLK_DEV_IO_TRACE
495 	bool "Support for tracing block IO actions"
496 	depends on SYSFS
497 	depends on BLOCK
498 	select RELAY
499 	select DEBUG_FS
500 	select TRACEPOINTS
501 	select GENERIC_TRACER
502 	select STACKTRACE
503 	help
504 	  Say Y here if you want to be able to trace the block layer actions
505 	  on a given queue. Tracing allows you to see any traffic happening
506 	  on a block device queue. For more information (and the userspace
507 	  support tools needed), fetch the blktrace tools from:
508 
509 	  git://git.kernel.dk/blktrace.git
510 
511 	  Tracing also is possible using the ftrace interface, e.g.:
512 
513 	    echo 1 > /sys/block/sda/sda1/trace/enable
514 	    echo blk > /sys/kernel/debug/tracing/current_tracer
515 	    cat /sys/kernel/debug/tracing/trace_pipe
516 
517 	  If unsure, say N.
518 
519 config KPROBE_EVENTS
520 	depends on KPROBES
521 	depends on HAVE_REGS_AND_STACK_ACCESS_API
522 	bool "Enable kprobes-based dynamic events"
523 	select TRACING
524 	select PROBE_EVENTS
525 	select DYNAMIC_EVENTS
526 	default y
527 	help
528 	  This allows the user to add tracing events (similar to tracepoints)
529 	  on the fly via the ftrace interface. See
530 	  Documentation/trace/kprobetrace.rst for more details.
531 
532 	  Those events can be inserted wherever kprobes can probe, and record
533 	  various register and memory values.
534 
535 	  This option is also required by perf-probe subcommand of perf tools.
536 	  If you want to use perf tools, this option is strongly recommended.
537 
538 config KPROBE_EVENTS_ON_NOTRACE
539 	bool "Do NOT protect notrace function from kprobe events"
540 	depends on KPROBE_EVENTS
541 	depends on DYNAMIC_FTRACE
542 	default n
543 	help
544 	  This is only for the developers who want to debug ftrace itself
545 	  using kprobe events.
546 
547 	  If kprobes can use ftrace instead of breakpoint, ftrace related
548 	  functions are protected from kprobe-events to prevent an infinit
549 	  recursion or any unexpected execution path which leads to a kernel
550 	  crash.
551 
552 	  This option disables such protection and allows you to put kprobe
553 	  events on ftrace functions for debugging ftrace by itself.
554 	  Note that this might let you shoot yourself in the foot.
555 
556 	  If unsure, say N.
557 
558 config UPROBE_EVENTS
559 	bool "Enable uprobes-based dynamic events"
560 	depends on ARCH_SUPPORTS_UPROBES
561 	depends on MMU
562 	depends on PERF_EVENTS
563 	select UPROBES
564 	select PROBE_EVENTS
565 	select DYNAMIC_EVENTS
566 	select TRACING
567 	default y
568 	help
569 	  This allows the user to add tracing events on top of userspace
570 	  dynamic events (similar to tracepoints) on the fly via the trace
571 	  events interface. Those events can be inserted wherever uprobes
572 	  can probe, and record various registers.
573 	  This option is required if you plan to use perf-probe subcommand
574 	  of perf tools on user space applications.
575 
576 config BPF_EVENTS
577 	depends on BPF_SYSCALL
578 	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
579 	bool
580 	default y
581 	help
582 	  This allows the user to attach BPF programs to kprobe, uprobe, and
583 	  tracepoint events.
584 
585 config DYNAMIC_EVENTS
586 	def_bool n
587 
588 config PROBE_EVENTS
589 	def_bool n
590 
591 config BPF_KPROBE_OVERRIDE
592 	bool "Enable BPF programs to override a kprobed function"
593 	depends on BPF_EVENTS
594 	depends on FUNCTION_ERROR_INJECTION
595 	default n
596 	help
597 	 Allows BPF to override the execution of a probed function and
598 	 set a different return value.  This is used for error injection.
599 
600 config FTRACE_MCOUNT_RECORD
601 	def_bool y
602 	depends on DYNAMIC_FTRACE
603 	depends on HAVE_FTRACE_MCOUNT_RECORD
604 
605 config TRACING_MAP
606 	bool
607 	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
608 	help
609 	  tracing_map is a special-purpose lock-free map for tracing,
610 	  separated out as a stand-alone facility in order to allow it
611 	  to be shared between multiple tracers.  It isn't meant to be
612 	  generally used outside of that context, and is normally
613 	  selected by tracers that use it.
614 
615 config SYNTH_EVENTS
616 	bool "Synthetic trace events"
617 	select TRACING
618 	select DYNAMIC_EVENTS
619 	default n
620 	help
621 	  Synthetic events are user-defined trace events that can be
622 	  used to combine data from other trace events or in fact any
623 	  data source.  Synthetic events can be generated indirectly
624 	  via the trace() action of histogram triggers or directly
625 	  by way of an in-kernel API.
626 
627 	  See Documentation/trace/events.rst or
628 	  Documentation/trace/histogram.rst for details and examples.
629 
630 	  If in doubt, say N.
631 
632 config HIST_TRIGGERS
633 	bool "Histogram triggers"
634 	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
635 	select TRACING_MAP
636 	select TRACING
637 	select DYNAMIC_EVENTS
638 	select SYNTH_EVENTS
639 	default n
640 	help
641 	  Hist triggers allow one or more arbitrary trace event fields
642 	  to be aggregated into hash tables and dumped to stdout by
643 	  reading a debugfs/tracefs file.  They're useful for
644 	  gathering quick and dirty (though precise) summaries of
645 	  event activity as an initial guide for further investigation
646 	  using more advanced tools.
647 
648 	  Inter-event tracing of quantities such as latencies is also
649 	  supported using hist triggers under this option.
650 
651 	  See Documentation/trace/histogram.rst.
652 	  If in doubt, say N.
653 
654 config TRACE_EVENT_INJECT
655 	bool "Trace event injection"
656 	depends on TRACING
657 	help
658 	  Allow user-space to inject a specific trace event into the ring
659 	  buffer. This is mainly used for testing purpose.
660 
661 	  If unsure, say N.
662 
663 config TRACEPOINT_BENCHMARK
664 	bool "Add tracepoint that benchmarks tracepoints"
665 	help
666 	 This option creates the tracepoint "benchmark:benchmark_event".
667 	 When the tracepoint is enabled, it kicks off a kernel thread that
668 	 goes into an infinite loop (calling cond_sched() to let other tasks
669 	 run), and calls the tracepoint. Each iteration will record the time
670 	 it took to write to the tracepoint and the next iteration that
671 	 data will be passed to the tracepoint itself. That is, the tracepoint
672 	 will report the time it took to do the previous tracepoint.
673 	 The string written to the tracepoint is a static string of 128 bytes
674 	 to keep the time the same. The initial string is simply a write of
675 	 "START". The second string records the cold cache time of the first
676 	 write which is not added to the rest of the calculations.
677 
678 	 As it is a tight loop, it benchmarks as hot cache. That's fine because
679 	 we care most about hot paths that are probably in cache already.
680 
681 	 An example of the output:
682 
683 	      START
684 	      first=3672 [COLD CACHED]
685 	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
686 	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
687 	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
688 	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
689 	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
690 	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
691 
692 
693 config RING_BUFFER_BENCHMARK
694 	tristate "Ring buffer benchmark stress tester"
695 	depends on RING_BUFFER
696 	help
697 	  This option creates a test to stress the ring buffer and benchmark it.
698 	  It creates its own ring buffer such that it will not interfere with
699 	  any other users of the ring buffer (such as ftrace). It then creates
700 	  a producer and consumer that will run for 10 seconds and sleep for
701 	  10 seconds. Each interval it will print out the number of events
702 	  it recorded and give a rough estimate of how long each iteration took.
703 
704 	  It does not disable interrupts or raise its priority, so it may be
705 	  affected by processes that are running.
706 
707 	  If unsure, say N.
708 
709 config TRACE_EVAL_MAP_FILE
710        bool "Show eval mappings for trace events"
711        depends on TRACING
712        help
713 	The "print fmt" of the trace events will show the enum/sizeof names
714 	instead of their values. This can cause problems for user space tools
715 	that use this string to parse the raw data as user space does not know
716 	how to convert the string to its value.
717 
718 	To fix this, there's a special macro in the kernel that can be used
719 	to convert an enum/sizeof into its value. If this macro is used, then
720 	the print fmt strings will be converted to their values.
721 
722 	If something does not get converted properly, this option can be
723 	used to show what enums/sizeof the kernel tried to convert.
724 
725 	This option is for debugging the conversions. A file is created
726 	in the tracing directory called "eval_map" that will show the
727 	names matched with their values and what trace event system they
728 	belong too.
729 
730 	Normally, the mapping of the strings to values will be freed after
731 	boot up or module load. With this option, they will not be freed, as
732 	they are needed for the "eval_map" file. Enabling this option will
733 	increase the memory footprint of the running kernel.
734 
735 	If unsure, say N.
736 
737 config FTRACE_RECORD_RECURSION
738 	bool "Record functions that recurse in function tracing"
739 	depends on FUNCTION_TRACER
740 	help
741 	  All callbacks that attach to the function tracing have some sort
742 	  of protection against recursion. Even though the protection exists,
743 	  it adds overhead. This option will create a file in the tracefs
744 	  file system called "recursed_functions" that will list the functions
745 	  that triggered a recursion.
746 
747 	  This will add more overhead to cases that have recursion.
748 
749 	  If unsure, say N
750 
751 config FTRACE_RECORD_RECURSION_SIZE
752 	int "Max number of recursed functions to record"
753 	default	128
754 	depends on FTRACE_RECORD_RECURSION
755 	help
756 	  This defines the limit of number of functions that can be
757 	  listed in the "recursed_functions" file, that lists all
758 	  the functions that caused a recursion to happen.
759 	  This file can be reset, but the limit can not change in
760 	  size at runtime.
761 
762 config RING_BUFFER_RECORD_RECURSION
763 	bool "Record functions that recurse in the ring buffer"
764 	depends on FTRACE_RECORD_RECURSION
765 	# default y, because it is coupled with FTRACE_RECORD_RECURSION
766 	default y
767 	help
768 	  The ring buffer has its own internal recursion. Although when
769 	  recursion happens it wont cause harm because of the protection,
770 	  but it does cause an unwanted overhead. Enabling this option will
771 	  place where recursion was detected into the ftrace "recursed_functions"
772 	  file.
773 
774 	  This will add more overhead to cases that have recursion.
775 
776 config GCOV_PROFILE_FTRACE
777 	bool "Enable GCOV profiling on ftrace subsystem"
778 	depends on GCOV_KERNEL
779 	help
780 	  Enable GCOV profiling on ftrace subsystem for checking
781 	  which functions/lines are tested.
782 
783 	  If unsure, say N.
784 
785 	  Note that on a kernel compiled with this config, ftrace will
786 	  run significantly slower.
787 
788 config FTRACE_SELFTEST
789 	bool
790 
791 config FTRACE_STARTUP_TEST
792 	bool "Perform a startup test on ftrace"
793 	depends on GENERIC_TRACER
794 	select FTRACE_SELFTEST
795 	help
796 	  This option performs a series of startup tests on ftrace. On bootup
797 	  a series of tests are made to verify that the tracer is
798 	  functioning properly. It will do tests on all the configured
799 	  tracers of ftrace.
800 
801 config EVENT_TRACE_STARTUP_TEST
802 	bool "Run selftest on trace events"
803 	depends on FTRACE_STARTUP_TEST
804 	default y
805 	help
806 	  This option performs a test on all trace events in the system.
807 	  It basically just enables each event and runs some code that
808 	  will trigger events (not necessarily the event it enables)
809 	  This may take some time run as there are a lot of events.
810 
811 config EVENT_TRACE_TEST_SYSCALLS
812 	bool "Run selftest on syscall events"
813 	depends on EVENT_TRACE_STARTUP_TEST
814 	help
815 	 This option will also enable testing every syscall event.
816 	 It only enables the event and disables it and runs various loads
817 	 with the event enabled. This adds a bit more time for kernel boot
818 	 up since it runs this on every system call defined.
819 
820 	 TBD - enable a way to actually call the syscalls as we test their
821 	       events
822 
823 config RING_BUFFER_STARTUP_TEST
824        bool "Ring buffer startup self test"
825        depends on RING_BUFFER
826        help
827 	 Run a simple self test on the ring buffer on boot up. Late in the
828 	 kernel boot sequence, the test will start that kicks off
829 	 a thread per cpu. Each thread will write various size events
830 	 into the ring buffer. Another thread is created to send IPIs
831 	 to each of the threads, where the IPI handler will also write
832 	 to the ring buffer, to test/stress the nesting ability.
833 	 If any anomalies are discovered, a warning will be displayed
834 	 and all ring buffers will be disabled.
835 
836 	 The test runs for 10 seconds. This will slow your boot time
837 	 by at least 10 more seconds.
838 
839 	 At the end of the test, statics and more checks are done.
840 	 It will output the stats of each per cpu buffer. What
841 	 was written, the sizes, what was read, what was lost, and
842 	 other similar details.
843 
844 	 If unsure, say N
845 
846 config RING_BUFFER_VALIDATE_TIME_DELTAS
847 	bool "Verify ring buffer time stamp deltas"
848 	depends on RING_BUFFER
849 	help
850 	  This will audit the time stamps on the ring buffer sub
851 	  buffer to make sure that all the time deltas for the
852 	  events on a sub buffer matches the current time stamp.
853 	  This audit is performed for every event that is not
854 	  interrupted, or interrupting another event. A check
855 	  is also made when traversing sub buffers to make sure
856 	  that all the deltas on the previous sub buffer do not
857 	  add up to be greater than the current time stamp.
858 
859 	  NOTE: This adds significant overhead to recording of events,
860 	  and should only be used to test the logic of the ring buffer.
861 	  Do not use it on production systems.
862 
863 	  Only say Y if you understand what this does, and you
864 	  still want it enabled. Otherwise say N
865 
866 config MMIOTRACE_TEST
867 	tristate "Test module for mmiotrace"
868 	depends on MMIOTRACE && m
869 	help
870 	  This is a dumb module for testing mmiotrace. It is very dangerous
871 	  as it will write garbage to IO memory starting at a given address.
872 	  However, it should be safe to use on e.g. unused portion of VRAM.
873 
874 	  Say N, unless you absolutely know what you are doing.
875 
876 config PREEMPTIRQ_DELAY_TEST
877 	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
878 	depends on m
879 	help
880 	  Select this option to build a test module that can help test latency
881 	  tracers by executing a preempt or irq disable section with a user
882 	  configurable delay. The module busy waits for the duration of the
883 	  critical section.
884 
885 	  For example, the following invocation generates a burst of three
886 	  irq-disabled critical sections for 500us:
887 	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
888 
889 	  If unsure, say N
890 
891 config SYNTH_EVENT_GEN_TEST
892 	tristate "Test module for in-kernel synthetic event generation"
893 	depends on SYNTH_EVENTS
894 	help
895           This option creates a test module to check the base
896           functionality of in-kernel synthetic event definition and
897           generation.
898 
899           To test, insert the module, and then check the trace buffer
900 	  for the generated sample events.
901 
902 	  If unsure, say N.
903 
904 config KPROBE_EVENT_GEN_TEST
905 	tristate "Test module for in-kernel kprobe event generation"
906 	depends on KPROBE_EVENTS
907 	help
908           This option creates a test module to check the base
909           functionality of in-kernel kprobe event definition.
910 
911           To test, insert the module, and then check the trace buffer
912 	  for the generated kprobe events.
913 
914 	  If unsure, say N.
915 
916 config HIST_TRIGGERS_DEBUG
917 	bool "Hist trigger debug support"
918 	depends on HIST_TRIGGERS
919 	help
920           Add "hist_debug" file for each event, which when read will
921           dump out a bunch of internal details about the hist triggers
922           defined on that event.
923 
924           The hist_debug file serves a couple of purposes:
925 
926             - Helps developers verify that nothing is broken.
927 
928             - Provides educational information to support the details
929               of the hist trigger internals as described by
930               Documentation/trace/histogram-design.rst.
931 
932           The hist_debug output only covers the data structures
933           related to the histogram definitions themselves and doesn't
934           display the internals of map buckets or variable values of
935           running histograms.
936 
937           If unsure, say N.
938 
939 endif # FTRACE
940 
941 endif # TRACING_SUPPORT
942 
943