xref: /openbmc/linux/kernel/trace/Kconfig (revision 808643ea)
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_FUNCTION_TRACER
14	bool
15	help
16	  See Documentation/trace/ftrace-design.rst
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20	help
21	  See Documentation/trace/ftrace-design.rst
22
23config HAVE_DYNAMIC_FTRACE
24	bool
25	help
26	  See Documentation/trace/ftrace-design.rst
27
28config HAVE_DYNAMIC_FTRACE_WITH_REGS
29	bool
30
31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
32	bool
33
34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
35	bool
36	help
37	 If this is set, then arguments and stack can be found from
38	 the pt_regs passed into the function callback regs parameter
39	 by default, even without setting the REGS flag in the ftrace_ops.
40	 This allows for use of regs_get_kernel_argument() and
41	 kernel_stack_pointer().
42
43config HAVE_FTRACE_MCOUNT_RECORD
44	bool
45	help
46	  See Documentation/trace/ftrace-design.rst
47
48config HAVE_SYSCALL_TRACEPOINTS
49	bool
50	help
51	  See Documentation/trace/ftrace-design.rst
52
53config HAVE_FENTRY
54	bool
55	help
56	  Arch supports the gcc options -pg with -mfentry
57
58config HAVE_NOP_MCOUNT
59	bool
60	help
61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
62
63config HAVE_OBJTOOL_MCOUNT
64	bool
65	help
66	  Arch supports objtool --mcount
67
68config HAVE_C_RECORDMCOUNT
69	bool
70	help
71	  C version of recordmcount available?
72
73config TRACER_MAX_TRACE
74	bool
75
76config TRACE_CLOCK
77	bool
78
79config RING_BUFFER
80	bool
81	select TRACE_CLOCK
82	select IRQ_WORK
83
84config EVENT_TRACING
85	select CONTEXT_SWITCH_TRACER
86	select GLOB
87	bool
88
89config CONTEXT_SWITCH_TRACER
90	bool
91
92config RING_BUFFER_ALLOW_SWAP
93	bool
94	help
95	 Allow the use of ring_buffer_swap_cpu.
96	 Adds a very slight overhead to tracing when enabled.
97
98config PREEMPTIRQ_TRACEPOINTS
99	bool
100	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
101	select TRACING
102	default y
103	help
104	  Create preempt/irq toggle tracepoints if needed, so that other parts
105	  of the kernel can use them to generate or add hooks to them.
106
107# All tracer options should select GENERIC_TRACER. For those options that are
108# enabled by all tracers (context switch and event tracer) they select TRACING.
109# This allows those options to appear when no other tracer is selected. But the
110# options do not appear when something else selects it. We need the two options
111# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
112# hiding of the automatic options.
113
114config TRACING
115	bool
116	select RING_BUFFER
117	select STACKTRACE if STACKTRACE_SUPPORT
118	select TRACEPOINTS
119	select NOP_TRACER
120	select BINARY_PRINTF
121	select EVENT_TRACING
122	select TRACE_CLOCK
123
124config GENERIC_TRACER
125	bool
126	select TRACING
127
128#
129# Minimum requirements an architecture has to meet for us to
130# be able to offer generic tracing facilities:
131#
132config TRACING_SUPPORT
133	bool
134	depends on TRACE_IRQFLAGS_SUPPORT
135	depends on STACKTRACE_SUPPORT
136	default y
137
138if TRACING_SUPPORT
139
140menuconfig FTRACE
141	bool "Tracers"
142	default y if DEBUG_KERNEL
143	help
144	  Enable the kernel tracing infrastructure.
145
146if FTRACE
147
148config BOOTTIME_TRACING
149	bool "Boot-time Tracing support"
150	depends on TRACING
151	select BOOT_CONFIG
152	help
153	  Enable developer to setup ftrace subsystem via supplemental
154	  kernel cmdline at boot time for debugging (tracing) driver
155	  initialization and boot process.
156
157config FUNCTION_TRACER
158	bool "Kernel Function Tracer"
159	depends on HAVE_FUNCTION_TRACER
160	select KALLSYMS
161	select GENERIC_TRACER
162	select CONTEXT_SWITCH_TRACER
163	select GLOB
164	select TASKS_RCU if PREEMPTION
165	select TASKS_RUDE_RCU
166	help
167	  Enable the kernel to trace every kernel function. This is done
168	  by using a compiler feature to insert a small, 5-byte No-Operation
169	  instruction at the beginning of every kernel function, which NOP
170	  sequence is then dynamically patched into a tracer call when
171	  tracing is enabled by the administrator. If it's runtime disabled
172	  (the bootup default), then the overhead of the instructions is very
173	  small and not measurable even in micro-benchmarks.
174
175config FUNCTION_GRAPH_TRACER
176	bool "Kernel Function Graph Tracer"
177	depends on HAVE_FUNCTION_GRAPH_TRACER
178	depends on FUNCTION_TRACER
179	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
180	default y
181	help
182	  Enable the kernel to trace a function at both its return
183	  and its entry.
184	  Its first purpose is to trace the duration of functions and
185	  draw a call graph for each thread with some information like
186	  the return value. This is done by setting the current return
187	  address on the current task structure into a stack of calls.
188
189config DYNAMIC_FTRACE
190	bool "enable/disable function tracing dynamically"
191	depends on FUNCTION_TRACER
192	depends on HAVE_DYNAMIC_FTRACE
193	default y
194	help
195	  This option will modify all the calls to function tracing
196	  dynamically (will patch them out of the binary image and
197	  replace them with a No-Op instruction) on boot up. During
198	  compile time, a table is made of all the locations that ftrace
199	  can function trace, and this table is linked into the kernel
200	  image. When this is enabled, functions can be individually
201	  enabled, and the functions not enabled will not affect
202	  performance of the system.
203
204	  See the files in /sys/kernel/debug/tracing:
205	    available_filter_functions
206	    set_ftrace_filter
207	    set_ftrace_notrace
208
209	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
210	  otherwise has native performance as long as no tracing is active.
211
212config DYNAMIC_FTRACE_WITH_REGS
213	def_bool y
214	depends on DYNAMIC_FTRACE
215	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
216
217config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
218	def_bool y
219	depends on DYNAMIC_FTRACE_WITH_REGS
220	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
221
222config FUNCTION_PROFILER
223	bool "Kernel function profiler"
224	depends on FUNCTION_TRACER
225	default n
226	help
227	  This option enables the kernel function profiler. A file is created
228	  in debugfs called function_profile_enabled which defaults to zero.
229	  When a 1 is echoed into this file profiling begins, and when a
230	  zero is entered, profiling stops. A "functions" file is created in
231	  the trace_stat directory; this file shows the list of functions that
232	  have been hit and their counters.
233
234	  If in doubt, say N.
235
236config STACK_TRACER
237	bool "Trace max stack"
238	depends on HAVE_FUNCTION_TRACER
239	select FUNCTION_TRACER
240	select STACKTRACE
241	select KALLSYMS
242	help
243	  This special tracer records the maximum stack footprint of the
244	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
245
246	  This tracer works by hooking into every function call that the
247	  kernel executes, and keeping a maximum stack depth value and
248	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
249	  then it will not have any overhead while the stack tracer
250	  is disabled.
251
252	  To enable the stack tracer on bootup, pass in 'stacktrace'
253	  on the kernel command line.
254
255	  The stack tracer can also be enabled or disabled via the
256	  sysctl kernel.stack_tracer_enabled
257
258	  Say N if unsure.
259
260config TRACE_PREEMPT_TOGGLE
261	bool
262	help
263	  Enables hooks which will be called when preemption is first disabled,
264	  and last enabled.
265
266config IRQSOFF_TRACER
267	bool "Interrupts-off Latency Tracer"
268	default n
269	depends on TRACE_IRQFLAGS_SUPPORT
270	select TRACE_IRQFLAGS
271	select GENERIC_TRACER
272	select TRACER_MAX_TRACE
273	select RING_BUFFER_ALLOW_SWAP
274	select TRACER_SNAPSHOT
275	select TRACER_SNAPSHOT_PER_CPU_SWAP
276	help
277	  This option measures the time spent in irqs-off critical
278	  sections, with microsecond accuracy.
279
280	  The default measurement method is a maximum search, which is
281	  disabled by default and can be runtime (re-)started
282	  via:
283
284	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
285
286	  (Note that kernel size and overhead increase with this option
287	  enabled. This option and the preempt-off timing option can be
288	  used together or separately.)
289
290config PREEMPT_TRACER
291	bool "Preemption-off Latency Tracer"
292	default n
293	depends on PREEMPTION
294	select GENERIC_TRACER
295	select TRACER_MAX_TRACE
296	select RING_BUFFER_ALLOW_SWAP
297	select TRACER_SNAPSHOT
298	select TRACER_SNAPSHOT_PER_CPU_SWAP
299	select TRACE_PREEMPT_TOGGLE
300	help
301	  This option measures the time spent in preemption-off critical
302	  sections, with microsecond accuracy.
303
304	  The default measurement method is a maximum search, which is
305	  disabled by default and can be runtime (re-)started
306	  via:
307
308	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
309
310	  (Note that kernel size and overhead increase with this option
311	  enabled. This option and the irqs-off timing option can be
312	  used together or separately.)
313
314config SCHED_TRACER
315	bool "Scheduling Latency Tracer"
316	select GENERIC_TRACER
317	select CONTEXT_SWITCH_TRACER
318	select TRACER_MAX_TRACE
319	select TRACER_SNAPSHOT
320	help
321	  This tracer tracks the latency of the highest priority task
322	  to be scheduled in, starting from the point it has woken up.
323
324config HWLAT_TRACER
325	bool "Tracer to detect hardware latencies (like SMIs)"
326	select GENERIC_TRACER
327	help
328	 This tracer, when enabled will create one or more kernel threads,
329	 depending on what the cpumask file is set to, which each thread
330	 spinning in a loop looking for interruptions caused by
331	 something other than the kernel. For example, if a
332	 System Management Interrupt (SMI) takes a noticeable amount of
333	 time, this tracer will detect it. This is useful for testing
334	 if a system is reliable for Real Time tasks.
335
336	 Some files are created in the tracing directory when this
337	 is enabled:
338
339	   hwlat_detector/width   - time in usecs for how long to spin for
340	   hwlat_detector/window  - time in usecs between the start of each
341				     iteration
342
343	 A kernel thread is created that will spin with interrupts disabled
344	 for "width" microseconds in every "window" cycle. It will not spin
345	 for "window - width" microseconds, where the system can
346	 continue to operate.
347
348	 The output will appear in the trace and trace_pipe files.
349
350	 When the tracer is not running, it has no affect on the system,
351	 but when it is running, it can cause the system to be
352	 periodically non responsive. Do not run this tracer on a
353	 production system.
354
355	 To enable this tracer, echo in "hwlat" into the current_tracer
356	 file. Every time a latency is greater than tracing_thresh, it will
357	 be recorded into the ring buffer.
358
359config OSNOISE_TRACER
360	bool "OS Noise tracer"
361	select GENERIC_TRACER
362	help
363	  In the context of high-performance computing (HPC), the Operating
364	  System Noise (osnoise) refers to the interference experienced by an
365	  application due to activities inside the operating system. In the
366	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
367	  can cause noise to the system. Moreover, hardware-related jobs can
368	  also cause noise, for example, via SMIs.
369
370	  The osnoise tracer leverages the hwlat_detector by running a similar
371	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
372	  the sources of osnoise during its execution. The osnoise tracer takes
373	  note of the entry and exit point of any source of interferences,
374	  increasing a per-cpu interference counter. It saves an interference
375	  counter for each source of interference. The interference counter for
376	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
377	  observes these interferences' entry events. When a noise happens
378	  without any interference from the operating system level, the
379	  hardware noise counter increases, pointing to a hardware-related
380	  noise. In this way, osnoise can account for any source of
381	  interference. At the end of the period, the osnoise tracer prints
382	  the sum of all noise, the max single noise, the percentage of CPU
383	  available for the thread, and the counters for the noise sources.
384
385	  In addition to the tracer, a set of tracepoints were added to
386	  facilitate the identification of the osnoise source.
387
388	  The output will appear in the trace and trace_pipe files.
389
390	  To enable this tracer, echo in "osnoise" into the current_tracer
391          file.
392
393config TIMERLAT_TRACER
394	bool "Timerlat tracer"
395	select OSNOISE_TRACER
396	select GENERIC_TRACER
397	help
398	  The timerlat tracer aims to help the preemptive kernel developers
399	  to find sources of wakeup latencies of real-time threads.
400
401	  The tracer creates a per-cpu kernel thread with real-time priority.
402	  The tracer thread sets a periodic timer to wakeup itself, and goes
403	  to sleep waiting for the timer to fire. At the wakeup, the thread
404	  then computes a wakeup latency value as the difference between
405	  the current time and the absolute time that the timer was set
406	  to expire.
407
408	  The tracer prints two lines at every activation. The first is the
409	  timer latency observed at the hardirq context before the
410	  activation of the thread. The second is the timer latency observed
411	  by the thread, which is the same level that cyclictest reports. The
412	  ACTIVATION ID field serves to relate the irq execution to its
413	  respective thread execution.
414
415	  The tracer is build on top of osnoise tracer, and the osnoise:
416	  events can be used to trace the source of interference from NMI,
417	  IRQs and other threads. It also enables the capture of the
418	  stacktrace at the IRQ context, which helps to identify the code
419	  path that can cause thread delay.
420
421config MMIOTRACE
422	bool "Memory mapped IO tracing"
423	depends on HAVE_MMIOTRACE_SUPPORT && PCI
424	select GENERIC_TRACER
425	help
426	  Mmiotrace traces Memory Mapped I/O access and is meant for
427	  debugging and reverse engineering. It is called from the ioremap
428	  implementation and works via page faults. Tracing is disabled by
429	  default and can be enabled at run-time.
430
431	  See Documentation/trace/mmiotrace.rst.
432	  If you are not helping to develop drivers, say N.
433
434config ENABLE_DEFAULT_TRACERS
435	bool "Trace process context switches and events"
436	depends on !GENERIC_TRACER
437	select TRACING
438	help
439	  This tracer hooks to various trace points in the kernel,
440	  allowing the user to pick and choose which trace point they
441	  want to trace. It also includes the sched_switch tracer plugin.
442
443config FTRACE_SYSCALLS
444	bool "Trace syscalls"
445	depends on HAVE_SYSCALL_TRACEPOINTS
446	select GENERIC_TRACER
447	select KALLSYMS
448	help
449	  Basic tracer to catch the syscall entry and exit events.
450
451config TRACER_SNAPSHOT
452	bool "Create a snapshot trace buffer"
453	select TRACER_MAX_TRACE
454	help
455	  Allow tracing users to take snapshot of the current buffer using the
456	  ftrace interface, e.g.:
457
458	      echo 1 > /sys/kernel/debug/tracing/snapshot
459	      cat snapshot
460
461config TRACER_SNAPSHOT_PER_CPU_SWAP
462	bool "Allow snapshot to swap per CPU"
463	depends on TRACER_SNAPSHOT
464	select RING_BUFFER_ALLOW_SWAP
465	help
466	  Allow doing a snapshot of a single CPU buffer instead of a
467	  full swap (all buffers). If this is set, then the following is
468	  allowed:
469
470	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
471
472	  After which, only the tracing buffer for CPU 2 was swapped with
473	  the main tracing buffer, and the other CPU buffers remain the same.
474
475	  When this is enabled, this adds a little more overhead to the
476	  trace recording, as it needs to add some checks to synchronize
477	  recording with swaps. But this does not affect the performance
478	  of the overall system. This is enabled by default when the preempt
479	  or irq latency tracers are enabled, as those need to swap as well
480	  and already adds the overhead (plus a lot more).
481
482config TRACE_BRANCH_PROFILING
483	bool
484	select GENERIC_TRACER
485
486choice
487	prompt "Branch Profiling"
488	default BRANCH_PROFILE_NONE
489	help
490	 The branch profiling is a software profiler. It will add hooks
491	 into the C conditionals to test which path a branch takes.
492
493	 The likely/unlikely profiler only looks at the conditions that
494	 are annotated with a likely or unlikely macro.
495
496	 The "all branch" profiler will profile every if-statement in the
497	 kernel. This profiler will also enable the likely/unlikely
498	 profiler.
499
500	 Either of the above profilers adds a bit of overhead to the system.
501	 If unsure, choose "No branch profiling".
502
503config BRANCH_PROFILE_NONE
504	bool "No branch profiling"
505	help
506	  No branch profiling. Branch profiling adds a bit of overhead.
507	  Only enable it if you want to analyse the branching behavior.
508	  Otherwise keep it disabled.
509
510config PROFILE_ANNOTATED_BRANCHES
511	bool "Trace likely/unlikely profiler"
512	select TRACE_BRANCH_PROFILING
513	help
514	  This tracer profiles all likely and unlikely macros
515	  in the kernel. It will display the results in:
516
517	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
518
519	  Note: this will add a significant overhead; only turn this
520	  on if you need to profile the system's use of these macros.
521
522config PROFILE_ALL_BRANCHES
523	bool "Profile all if conditionals" if !FORTIFY_SOURCE
524	select TRACE_BRANCH_PROFILING
525	help
526	  This tracer profiles all branch conditions. Every if ()
527	  taken in the kernel is recorded whether it hit or miss.
528	  The results will be displayed in:
529
530	  /sys/kernel/debug/tracing/trace_stat/branch_all
531
532	  This option also enables the likely/unlikely profiler.
533
534	  This configuration, when enabled, will impose a great overhead
535	  on the system. This should only be enabled when the system
536	  is to be analyzed in much detail.
537endchoice
538
539config TRACING_BRANCHES
540	bool
541	help
542	  Selected by tracers that will trace the likely and unlikely
543	  conditions. This prevents the tracers themselves from being
544	  profiled. Profiling the tracing infrastructure can only happen
545	  when the likelys and unlikelys are not being traced.
546
547config BRANCH_TRACER
548	bool "Trace likely/unlikely instances"
549	depends on TRACE_BRANCH_PROFILING
550	select TRACING_BRANCHES
551	help
552	  This traces the events of likely and unlikely condition
553	  calls in the kernel.  The difference between this and the
554	  "Trace likely/unlikely profiler" is that this is not a
555	  histogram of the callers, but actually places the calling
556	  events into a running trace buffer to see when and where the
557	  events happened, as well as their results.
558
559	  Say N if unsure.
560
561config BLK_DEV_IO_TRACE
562	bool "Support for tracing block IO actions"
563	depends on SYSFS
564	depends on BLOCK
565	select RELAY
566	select DEBUG_FS
567	select TRACEPOINTS
568	select GENERIC_TRACER
569	select STACKTRACE
570	help
571	  Say Y here if you want to be able to trace the block layer actions
572	  on a given queue. Tracing allows you to see any traffic happening
573	  on a block device queue. For more information (and the userspace
574	  support tools needed), fetch the blktrace tools from:
575
576	  git://git.kernel.dk/blktrace.git
577
578	  Tracing also is possible using the ftrace interface, e.g.:
579
580	    echo 1 > /sys/block/sda/sda1/trace/enable
581	    echo blk > /sys/kernel/debug/tracing/current_tracer
582	    cat /sys/kernel/debug/tracing/trace_pipe
583
584	  If unsure, say N.
585
586config KPROBE_EVENTS
587	depends on KPROBES
588	depends on HAVE_REGS_AND_STACK_ACCESS_API
589	bool "Enable kprobes-based dynamic events"
590	select TRACING
591	select PROBE_EVENTS
592	select DYNAMIC_EVENTS
593	default y
594	help
595	  This allows the user to add tracing events (similar to tracepoints)
596	  on the fly via the ftrace interface. See
597	  Documentation/trace/kprobetrace.rst for more details.
598
599	  Those events can be inserted wherever kprobes can probe, and record
600	  various register and memory values.
601
602	  This option is also required by perf-probe subcommand of perf tools.
603	  If you want to use perf tools, this option is strongly recommended.
604
605config KPROBE_EVENTS_ON_NOTRACE
606	bool "Do NOT protect notrace function from kprobe events"
607	depends on KPROBE_EVENTS
608	depends on DYNAMIC_FTRACE
609	default n
610	help
611	  This is only for the developers who want to debug ftrace itself
612	  using kprobe events.
613
614	  If kprobes can use ftrace instead of breakpoint, ftrace related
615	  functions are protected from kprobe-events to prevent an infinite
616	  recursion or any unexpected execution path which leads to a kernel
617	  crash.
618
619	  This option disables such protection and allows you to put kprobe
620	  events on ftrace functions for debugging ftrace by itself.
621	  Note that this might let you shoot yourself in the foot.
622
623	  If unsure, say N.
624
625config UPROBE_EVENTS
626	bool "Enable uprobes-based dynamic events"
627	depends on ARCH_SUPPORTS_UPROBES
628	depends on MMU
629	depends on PERF_EVENTS
630	select UPROBES
631	select PROBE_EVENTS
632	select DYNAMIC_EVENTS
633	select TRACING
634	default y
635	help
636	  This allows the user to add tracing events on top of userspace
637	  dynamic events (similar to tracepoints) on the fly via the trace
638	  events interface. Those events can be inserted wherever uprobes
639	  can probe, and record various registers.
640	  This option is required if you plan to use perf-probe subcommand
641	  of perf tools on user space applications.
642
643config BPF_EVENTS
644	depends on BPF_SYSCALL
645	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
646	bool
647	default y
648	help
649	  This allows the user to attach BPF programs to kprobe, uprobe, and
650	  tracepoint events.
651
652config DYNAMIC_EVENTS
653	def_bool n
654
655config PROBE_EVENTS
656	def_bool n
657
658config BPF_KPROBE_OVERRIDE
659	bool "Enable BPF programs to override a kprobed function"
660	depends on BPF_EVENTS
661	depends on FUNCTION_ERROR_INJECTION
662	default n
663	help
664	 Allows BPF to override the execution of a probed function and
665	 set a different return value.  This is used for error injection.
666
667config FTRACE_MCOUNT_RECORD
668	def_bool y
669	depends on DYNAMIC_FTRACE
670	depends on HAVE_FTRACE_MCOUNT_RECORD
671
672config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
673	bool
674	depends on FTRACE_MCOUNT_RECORD
675
676config FTRACE_MCOUNT_USE_CC
677	def_bool y
678	depends on $(cc-option,-mrecord-mcount)
679	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
680	depends on FTRACE_MCOUNT_RECORD
681
682config FTRACE_MCOUNT_USE_OBJTOOL
683	def_bool y
684	depends on HAVE_OBJTOOL_MCOUNT
685	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
686	depends on !FTRACE_MCOUNT_USE_CC
687	depends on FTRACE_MCOUNT_RECORD
688
689config FTRACE_MCOUNT_USE_RECORDMCOUNT
690	def_bool y
691	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
692	depends on !FTRACE_MCOUNT_USE_CC
693	depends on !FTRACE_MCOUNT_USE_OBJTOOL
694	depends on FTRACE_MCOUNT_RECORD
695
696config TRACING_MAP
697	bool
698	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
699	help
700	  tracing_map is a special-purpose lock-free map for tracing,
701	  separated out as a stand-alone facility in order to allow it
702	  to be shared between multiple tracers.  It isn't meant to be
703	  generally used outside of that context, and is normally
704	  selected by tracers that use it.
705
706config SYNTH_EVENTS
707	bool "Synthetic trace events"
708	select TRACING
709	select DYNAMIC_EVENTS
710	default n
711	help
712	  Synthetic events are user-defined trace events that can be
713	  used to combine data from other trace events or in fact any
714	  data source.  Synthetic events can be generated indirectly
715	  via the trace() action of histogram triggers or directly
716	  by way of an in-kernel API.
717
718	  See Documentation/trace/events.rst or
719	  Documentation/trace/histogram.rst for details and examples.
720
721	  If in doubt, say N.
722
723config HIST_TRIGGERS
724	bool "Histogram triggers"
725	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
726	select TRACING_MAP
727	select TRACING
728	select DYNAMIC_EVENTS
729	select SYNTH_EVENTS
730	default n
731	help
732	  Hist triggers allow one or more arbitrary trace event fields
733	  to be aggregated into hash tables and dumped to stdout by
734	  reading a debugfs/tracefs file.  They're useful for
735	  gathering quick and dirty (though precise) summaries of
736	  event activity as an initial guide for further investigation
737	  using more advanced tools.
738
739	  Inter-event tracing of quantities such as latencies is also
740	  supported using hist triggers under this option.
741
742	  See Documentation/trace/histogram.rst.
743	  If in doubt, say N.
744
745config TRACE_EVENT_INJECT
746	bool "Trace event injection"
747	depends on TRACING
748	help
749	  Allow user-space to inject a specific trace event into the ring
750	  buffer. This is mainly used for testing purpose.
751
752	  If unsure, say N.
753
754config TRACEPOINT_BENCHMARK
755	bool "Add tracepoint that benchmarks tracepoints"
756	help
757	 This option creates the tracepoint "benchmark:benchmark_event".
758	 When the tracepoint is enabled, it kicks off a kernel thread that
759	 goes into an infinite loop (calling cond_resched() to let other tasks
760	 run), and calls the tracepoint. Each iteration will record the time
761	 it took to write to the tracepoint and the next iteration that
762	 data will be passed to the tracepoint itself. That is, the tracepoint
763	 will report the time it took to do the previous tracepoint.
764	 The string written to the tracepoint is a static string of 128 bytes
765	 to keep the time the same. The initial string is simply a write of
766	 "START". The second string records the cold cache time of the first
767	 write which is not added to the rest of the calculations.
768
769	 As it is a tight loop, it benchmarks as hot cache. That's fine because
770	 we care most about hot paths that are probably in cache already.
771
772	 An example of the output:
773
774	      START
775	      first=3672 [COLD CACHED]
776	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
777	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
778	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
779	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
780	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
781	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
782
783
784config RING_BUFFER_BENCHMARK
785	tristate "Ring buffer benchmark stress tester"
786	depends on RING_BUFFER
787	help
788	  This option creates a test to stress the ring buffer and benchmark it.
789	  It creates its own ring buffer such that it will not interfere with
790	  any other users of the ring buffer (such as ftrace). It then creates
791	  a producer and consumer that will run for 10 seconds and sleep for
792	  10 seconds. Each interval it will print out the number of events
793	  it recorded and give a rough estimate of how long each iteration took.
794
795	  It does not disable interrupts or raise its priority, so it may be
796	  affected by processes that are running.
797
798	  If unsure, say N.
799
800config TRACE_EVAL_MAP_FILE
801       bool "Show eval mappings for trace events"
802       depends on TRACING
803       help
804	The "print fmt" of the trace events will show the enum/sizeof names
805	instead of their values. This can cause problems for user space tools
806	that use this string to parse the raw data as user space does not know
807	how to convert the string to its value.
808
809	To fix this, there's a special macro in the kernel that can be used
810	to convert an enum/sizeof into its value. If this macro is used, then
811	the print fmt strings will be converted to their values.
812
813	If something does not get converted properly, this option can be
814	used to show what enums/sizeof the kernel tried to convert.
815
816	This option is for debugging the conversions. A file is created
817	in the tracing directory called "eval_map" that will show the
818	names matched with their values and what trace event system they
819	belong too.
820
821	Normally, the mapping of the strings to values will be freed after
822	boot up or module load. With this option, they will not be freed, as
823	they are needed for the "eval_map" file. Enabling this option will
824	increase the memory footprint of the running kernel.
825
826	If unsure, say N.
827
828config FTRACE_RECORD_RECURSION
829	bool "Record functions that recurse in function tracing"
830	depends on FUNCTION_TRACER
831	help
832	  All callbacks that attach to the function tracing have some sort
833	  of protection against recursion. Even though the protection exists,
834	  it adds overhead. This option will create a file in the tracefs
835	  file system called "recursed_functions" that will list the functions
836	  that triggered a recursion.
837
838	  This will add more overhead to cases that have recursion.
839
840	  If unsure, say N
841
842config FTRACE_RECORD_RECURSION_SIZE
843	int "Max number of recursed functions to record"
844	default	128
845	depends on FTRACE_RECORD_RECURSION
846	help
847	  This defines the limit of number of functions that can be
848	  listed in the "recursed_functions" file, that lists all
849	  the functions that caused a recursion to happen.
850	  This file can be reset, but the limit can not change in
851	  size at runtime.
852
853config RING_BUFFER_RECORD_RECURSION
854	bool "Record functions that recurse in the ring buffer"
855	depends on FTRACE_RECORD_RECURSION
856	# default y, because it is coupled with FTRACE_RECORD_RECURSION
857	default y
858	help
859	  The ring buffer has its own internal recursion. Although when
860	  recursion happens it wont cause harm because of the protection,
861	  but it does cause an unwanted overhead. Enabling this option will
862	  place where recursion was detected into the ftrace "recursed_functions"
863	  file.
864
865	  This will add more overhead to cases that have recursion.
866
867config GCOV_PROFILE_FTRACE
868	bool "Enable GCOV profiling on ftrace subsystem"
869	depends on GCOV_KERNEL
870	help
871	  Enable GCOV profiling on ftrace subsystem for checking
872	  which functions/lines are tested.
873
874	  If unsure, say N.
875
876	  Note that on a kernel compiled with this config, ftrace will
877	  run significantly slower.
878
879config FTRACE_SELFTEST
880	bool
881
882config FTRACE_STARTUP_TEST
883	bool "Perform a startup test on ftrace"
884	depends on GENERIC_TRACER
885	select FTRACE_SELFTEST
886	help
887	  This option performs a series of startup tests on ftrace. On bootup
888	  a series of tests are made to verify that the tracer is
889	  functioning properly. It will do tests on all the configured
890	  tracers of ftrace.
891
892config EVENT_TRACE_STARTUP_TEST
893	bool "Run selftest on trace events"
894	depends on FTRACE_STARTUP_TEST
895	default y
896	help
897	  This option performs a test on all trace events in the system.
898	  It basically just enables each event and runs some code that
899	  will trigger events (not necessarily the event it enables)
900	  This may take some time run as there are a lot of events.
901
902config EVENT_TRACE_TEST_SYSCALLS
903	bool "Run selftest on syscall events"
904	depends on EVENT_TRACE_STARTUP_TEST
905	help
906	 This option will also enable testing every syscall event.
907	 It only enables the event and disables it and runs various loads
908	 with the event enabled. This adds a bit more time for kernel boot
909	 up since it runs this on every system call defined.
910
911	 TBD - enable a way to actually call the syscalls as we test their
912	       events
913
914config RING_BUFFER_STARTUP_TEST
915       bool "Ring buffer startup self test"
916       depends on RING_BUFFER
917       help
918	 Run a simple self test on the ring buffer on boot up. Late in the
919	 kernel boot sequence, the test will start that kicks off
920	 a thread per cpu. Each thread will write various size events
921	 into the ring buffer. Another thread is created to send IPIs
922	 to each of the threads, where the IPI handler will also write
923	 to the ring buffer, to test/stress the nesting ability.
924	 If any anomalies are discovered, a warning will be displayed
925	 and all ring buffers will be disabled.
926
927	 The test runs for 10 seconds. This will slow your boot time
928	 by at least 10 more seconds.
929
930	 At the end of the test, statics and more checks are done.
931	 It will output the stats of each per cpu buffer. What
932	 was written, the sizes, what was read, what was lost, and
933	 other similar details.
934
935	 If unsure, say N
936
937config RING_BUFFER_VALIDATE_TIME_DELTAS
938	bool "Verify ring buffer time stamp deltas"
939	depends on RING_BUFFER
940	help
941	  This will audit the time stamps on the ring buffer sub
942	  buffer to make sure that all the time deltas for the
943	  events on a sub buffer matches the current time stamp.
944	  This audit is performed for every event that is not
945	  interrupted, or interrupting another event. A check
946	  is also made when traversing sub buffers to make sure
947	  that all the deltas on the previous sub buffer do not
948	  add up to be greater than the current time stamp.
949
950	  NOTE: This adds significant overhead to recording of events,
951	  and should only be used to test the logic of the ring buffer.
952	  Do not use it on production systems.
953
954	  Only say Y if you understand what this does, and you
955	  still want it enabled. Otherwise say N
956
957config MMIOTRACE_TEST
958	tristate "Test module for mmiotrace"
959	depends on MMIOTRACE && m
960	help
961	  This is a dumb module for testing mmiotrace. It is very dangerous
962	  as it will write garbage to IO memory starting at a given address.
963	  However, it should be safe to use on e.g. unused portion of VRAM.
964
965	  Say N, unless you absolutely know what you are doing.
966
967config PREEMPTIRQ_DELAY_TEST
968	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
969	depends on m
970	help
971	  Select this option to build a test module that can help test latency
972	  tracers by executing a preempt or irq disable section with a user
973	  configurable delay. The module busy waits for the duration of the
974	  critical section.
975
976	  For example, the following invocation generates a burst of three
977	  irq-disabled critical sections for 500us:
978	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
979
980	  What's more, if you want to attach the test on the cpu which the latency
981	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
982	  command.
983
984	  If unsure, say N
985
986config SYNTH_EVENT_GEN_TEST
987	tristate "Test module for in-kernel synthetic event generation"
988	depends on SYNTH_EVENTS
989	help
990          This option creates a test module to check the base
991          functionality of in-kernel synthetic event definition and
992          generation.
993
994          To test, insert the module, and then check the trace buffer
995	  for the generated sample events.
996
997	  If unsure, say N.
998
999config KPROBE_EVENT_GEN_TEST
1000	tristate "Test module for in-kernel kprobe event generation"
1001	depends on KPROBE_EVENTS
1002	help
1003          This option creates a test module to check the base
1004          functionality of in-kernel kprobe event definition.
1005
1006          To test, insert the module, and then check the trace buffer
1007	  for the generated kprobe events.
1008
1009	  If unsure, say N.
1010
1011config HIST_TRIGGERS_DEBUG
1012	bool "Hist trigger debug support"
1013	depends on HIST_TRIGGERS
1014	help
1015          Add "hist_debug" file for each event, which when read will
1016          dump out a bunch of internal details about the hist triggers
1017          defined on that event.
1018
1019          The hist_debug file serves a couple of purposes:
1020
1021            - Helps developers verify that nothing is broken.
1022
1023            - Provides educational information to support the details
1024              of the hist trigger internals as described by
1025              Documentation/trace/histogram-design.rst.
1026
1027          The hist_debug output only covers the data structures
1028          related to the histogram definitions themselves and doesn't
1029          display the internals of map buckets or variable values of
1030          running histograms.
1031
1032          If unsure, say N.
1033
1034endif # FTRACE
1035
1036endif # TRACING_SUPPORT
1037
1038