xref: /openbmc/linux/kernel/trace/Kconfig (revision 9dd7c463)
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_FUNCTION_TRACER
14	bool
15	help
16	  See Documentation/trace/ftrace-design.rst
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20	help
21	  See Documentation/trace/ftrace-design.rst
22
23config HAVE_DYNAMIC_FTRACE
24	bool
25	help
26	  See Documentation/trace/ftrace-design.rst
27
28config HAVE_DYNAMIC_FTRACE_WITH_REGS
29	bool
30
31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
32	bool
33
34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
35	bool
36	help
37	 If this is set, then arguments and stack can be found from
38	 the pt_regs passed into the function callback regs parameter
39	 by default, even without setting the REGS flag in the ftrace_ops.
40	 This allows for use of regs_get_kernel_argument() and
41	 kernel_stack_pointer().
42
43config HAVE_FTRACE_MCOUNT_RECORD
44	bool
45	help
46	  See Documentation/trace/ftrace-design.rst
47
48config HAVE_SYSCALL_TRACEPOINTS
49	bool
50	help
51	  See Documentation/trace/ftrace-design.rst
52
53config HAVE_FENTRY
54	bool
55	help
56	  Arch supports the gcc options -pg with -mfentry
57
58config HAVE_NOP_MCOUNT
59	bool
60	help
61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
62
63config HAVE_OBJTOOL_MCOUNT
64	bool
65	help
66	  Arch supports objtool --mcount
67
68config HAVE_C_RECORDMCOUNT
69	bool
70	help
71	  C version of recordmcount available?
72
73config BUILDTIME_MCOUNT_SORT
74       bool
75       default y
76       depends on BUILDTIME_TABLE_SORT && !S390
77       help
78         Sort the mcount_loc section at build time.
79
80config TRACER_MAX_TRACE
81	bool
82
83config TRACE_CLOCK
84	bool
85
86config RING_BUFFER
87	bool
88	select TRACE_CLOCK
89	select IRQ_WORK
90
91config EVENT_TRACING
92	select CONTEXT_SWITCH_TRACER
93	select GLOB
94	bool
95
96config CONTEXT_SWITCH_TRACER
97	bool
98
99config RING_BUFFER_ALLOW_SWAP
100	bool
101	help
102	 Allow the use of ring_buffer_swap_cpu.
103	 Adds a very slight overhead to tracing when enabled.
104
105config PREEMPTIRQ_TRACEPOINTS
106	bool
107	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
108	select TRACING
109	default y
110	help
111	  Create preempt/irq toggle tracepoints if needed, so that other parts
112	  of the kernel can use them to generate or add hooks to them.
113
114# All tracer options should select GENERIC_TRACER. For those options that are
115# enabled by all tracers (context switch and event tracer) they select TRACING.
116# This allows those options to appear when no other tracer is selected. But the
117# options do not appear when something else selects it. We need the two options
118# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
119# hiding of the automatic options.
120
121config TRACING
122	bool
123	select RING_BUFFER
124	select STACKTRACE if STACKTRACE_SUPPORT
125	select TRACEPOINTS
126	select NOP_TRACER
127	select BINARY_PRINTF
128	select EVENT_TRACING
129	select TRACE_CLOCK
130
131config GENERIC_TRACER
132	bool
133	select TRACING
134
135#
136# Minimum requirements an architecture has to meet for us to
137# be able to offer generic tracing facilities:
138#
139config TRACING_SUPPORT
140	bool
141	depends on TRACE_IRQFLAGS_SUPPORT
142	depends on STACKTRACE_SUPPORT
143	default y
144
145menuconfig FTRACE
146	bool "Tracers"
147	depends on TRACING_SUPPORT
148	default y if DEBUG_KERNEL
149	help
150	  Enable the kernel tracing infrastructure.
151
152if FTRACE
153
154config BOOTTIME_TRACING
155	bool "Boot-time Tracing support"
156	depends on TRACING
157	select BOOT_CONFIG
158	help
159	  Enable developer to setup ftrace subsystem via supplemental
160	  kernel cmdline at boot time for debugging (tracing) driver
161	  initialization and boot process.
162
163config FUNCTION_TRACER
164	bool "Kernel Function Tracer"
165	depends on HAVE_FUNCTION_TRACER
166	select KALLSYMS
167	select GENERIC_TRACER
168	select CONTEXT_SWITCH_TRACER
169	select GLOB
170	select TASKS_RCU if PREEMPTION
171	select TASKS_RUDE_RCU
172	help
173	  Enable the kernel to trace every kernel function. This is done
174	  by using a compiler feature to insert a small, 5-byte No-Operation
175	  instruction at the beginning of every kernel function, which NOP
176	  sequence is then dynamically patched into a tracer call when
177	  tracing is enabled by the administrator. If it's runtime disabled
178	  (the bootup default), then the overhead of the instructions is very
179	  small and not measurable even in micro-benchmarks.
180
181config FUNCTION_GRAPH_TRACER
182	bool "Kernel Function Graph Tracer"
183	depends on HAVE_FUNCTION_GRAPH_TRACER
184	depends on FUNCTION_TRACER
185	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
186	default y
187	help
188	  Enable the kernel to trace a function at both its return
189	  and its entry.
190	  Its first purpose is to trace the duration of functions and
191	  draw a call graph for each thread with some information like
192	  the return value. This is done by setting the current return
193	  address on the current task structure into a stack of calls.
194
195config DYNAMIC_FTRACE
196	bool "enable/disable function tracing dynamically"
197	depends on FUNCTION_TRACER
198	depends on HAVE_DYNAMIC_FTRACE
199	default y
200	help
201	  This option will modify all the calls to function tracing
202	  dynamically (will patch them out of the binary image and
203	  replace them with a No-Op instruction) on boot up. During
204	  compile time, a table is made of all the locations that ftrace
205	  can function trace, and this table is linked into the kernel
206	  image. When this is enabled, functions can be individually
207	  enabled, and the functions not enabled will not affect
208	  performance of the system.
209
210	  See the files in /sys/kernel/debug/tracing:
211	    available_filter_functions
212	    set_ftrace_filter
213	    set_ftrace_notrace
214
215	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
216	  otherwise has native performance as long as no tracing is active.
217
218config DYNAMIC_FTRACE_WITH_REGS
219	def_bool y
220	depends on DYNAMIC_FTRACE
221	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
222
223config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
224	def_bool y
225	depends on DYNAMIC_FTRACE_WITH_REGS
226	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
227
228config DYNAMIC_FTRACE_WITH_ARGS
229	def_bool y
230	depends on DYNAMIC_FTRACE
231	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
232
233config FUNCTION_PROFILER
234	bool "Kernel function profiler"
235	depends on FUNCTION_TRACER
236	default n
237	help
238	  This option enables the kernel function profiler. A file is created
239	  in debugfs called function_profile_enabled which defaults to zero.
240	  When a 1 is echoed into this file profiling begins, and when a
241	  zero is entered, profiling stops. A "functions" file is created in
242	  the trace_stat directory; this file shows the list of functions that
243	  have been hit and their counters.
244
245	  If in doubt, say N.
246
247config STACK_TRACER
248	bool "Trace max stack"
249	depends on HAVE_FUNCTION_TRACER
250	select FUNCTION_TRACER
251	select STACKTRACE
252	select KALLSYMS
253	help
254	  This special tracer records the maximum stack footprint of the
255	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
256
257	  This tracer works by hooking into every function call that the
258	  kernel executes, and keeping a maximum stack depth value and
259	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
260	  then it will not have any overhead while the stack tracer
261	  is disabled.
262
263	  To enable the stack tracer on bootup, pass in 'stacktrace'
264	  on the kernel command line.
265
266	  The stack tracer can also be enabled or disabled via the
267	  sysctl kernel.stack_tracer_enabled
268
269	  Say N if unsure.
270
271config TRACE_PREEMPT_TOGGLE
272	bool
273	help
274	  Enables hooks which will be called when preemption is first disabled,
275	  and last enabled.
276
277config IRQSOFF_TRACER
278	bool "Interrupts-off Latency Tracer"
279	default n
280	depends on TRACE_IRQFLAGS_SUPPORT
281	select TRACE_IRQFLAGS
282	select GENERIC_TRACER
283	select TRACER_MAX_TRACE
284	select RING_BUFFER_ALLOW_SWAP
285	select TRACER_SNAPSHOT
286	select TRACER_SNAPSHOT_PER_CPU_SWAP
287	help
288	  This option measures the time spent in irqs-off critical
289	  sections, with microsecond accuracy.
290
291	  The default measurement method is a maximum search, which is
292	  disabled by default and can be runtime (re-)started
293	  via:
294
295	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
296
297	  (Note that kernel size and overhead increase with this option
298	  enabled. This option and the preempt-off timing option can be
299	  used together or separately.)
300
301config PREEMPT_TRACER
302	bool "Preemption-off Latency Tracer"
303	default n
304	depends on PREEMPTION
305	select GENERIC_TRACER
306	select TRACER_MAX_TRACE
307	select RING_BUFFER_ALLOW_SWAP
308	select TRACER_SNAPSHOT
309	select TRACER_SNAPSHOT_PER_CPU_SWAP
310	select TRACE_PREEMPT_TOGGLE
311	help
312	  This option measures the time spent in preemption-off critical
313	  sections, with microsecond accuracy.
314
315	  The default measurement method is a maximum search, which is
316	  disabled by default and can be runtime (re-)started
317	  via:
318
319	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
320
321	  (Note that kernel size and overhead increase with this option
322	  enabled. This option and the irqs-off timing option can be
323	  used together or separately.)
324
325config SCHED_TRACER
326	bool "Scheduling Latency Tracer"
327	select GENERIC_TRACER
328	select CONTEXT_SWITCH_TRACER
329	select TRACER_MAX_TRACE
330	select TRACER_SNAPSHOT
331	help
332	  This tracer tracks the latency of the highest priority task
333	  to be scheduled in, starting from the point it has woken up.
334
335config HWLAT_TRACER
336	bool "Tracer to detect hardware latencies (like SMIs)"
337	select GENERIC_TRACER
338	help
339	 This tracer, when enabled will create one or more kernel threads,
340	 depending on what the cpumask file is set to, which each thread
341	 spinning in a loop looking for interruptions caused by
342	 something other than the kernel. For example, if a
343	 System Management Interrupt (SMI) takes a noticeable amount of
344	 time, this tracer will detect it. This is useful for testing
345	 if a system is reliable for Real Time tasks.
346
347	 Some files are created in the tracing directory when this
348	 is enabled:
349
350	   hwlat_detector/width   - time in usecs for how long to spin for
351	   hwlat_detector/window  - time in usecs between the start of each
352				     iteration
353
354	 A kernel thread is created that will spin with interrupts disabled
355	 for "width" microseconds in every "window" cycle. It will not spin
356	 for "window - width" microseconds, where the system can
357	 continue to operate.
358
359	 The output will appear in the trace and trace_pipe files.
360
361	 When the tracer is not running, it has no affect on the system,
362	 but when it is running, it can cause the system to be
363	 periodically non responsive. Do not run this tracer on a
364	 production system.
365
366	 To enable this tracer, echo in "hwlat" into the current_tracer
367	 file. Every time a latency is greater than tracing_thresh, it will
368	 be recorded into the ring buffer.
369
370config OSNOISE_TRACER
371	bool "OS Noise tracer"
372	select GENERIC_TRACER
373	help
374	  In the context of high-performance computing (HPC), the Operating
375	  System Noise (osnoise) refers to the interference experienced by an
376	  application due to activities inside the operating system. In the
377	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
378	  can cause noise to the system. Moreover, hardware-related jobs can
379	  also cause noise, for example, via SMIs.
380
381	  The osnoise tracer leverages the hwlat_detector by running a similar
382	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
383	  the sources of osnoise during its execution. The osnoise tracer takes
384	  note of the entry and exit point of any source of interferences,
385	  increasing a per-cpu interference counter. It saves an interference
386	  counter for each source of interference. The interference counter for
387	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
388	  observes these interferences' entry events. When a noise happens
389	  without any interference from the operating system level, the
390	  hardware noise counter increases, pointing to a hardware-related
391	  noise. In this way, osnoise can account for any source of
392	  interference. At the end of the period, the osnoise tracer prints
393	  the sum of all noise, the max single noise, the percentage of CPU
394	  available for the thread, and the counters for the noise sources.
395
396	  In addition to the tracer, a set of tracepoints were added to
397	  facilitate the identification of the osnoise source.
398
399	  The output will appear in the trace and trace_pipe files.
400
401	  To enable this tracer, echo in "osnoise" into the current_tracer
402          file.
403
404config TIMERLAT_TRACER
405	bool "Timerlat tracer"
406	select OSNOISE_TRACER
407	select GENERIC_TRACER
408	help
409	  The timerlat tracer aims to help the preemptive kernel developers
410	  to find sources of wakeup latencies of real-time threads.
411
412	  The tracer creates a per-cpu kernel thread with real-time priority.
413	  The tracer thread sets a periodic timer to wakeup itself, and goes
414	  to sleep waiting for the timer to fire. At the wakeup, the thread
415	  then computes a wakeup latency value as the difference between
416	  the current time and the absolute time that the timer was set
417	  to expire.
418
419	  The tracer prints two lines at every activation. The first is the
420	  timer latency observed at the hardirq context before the
421	  activation of the thread. The second is the timer latency observed
422	  by the thread, which is the same level that cyclictest reports. The
423	  ACTIVATION ID field serves to relate the irq execution to its
424	  respective thread execution.
425
426	  The tracer is build on top of osnoise tracer, and the osnoise:
427	  events can be used to trace the source of interference from NMI,
428	  IRQs and other threads. It also enables the capture of the
429	  stacktrace at the IRQ context, which helps to identify the code
430	  path that can cause thread delay.
431
432config MMIOTRACE
433	bool "Memory mapped IO tracing"
434	depends on HAVE_MMIOTRACE_SUPPORT && PCI
435	select GENERIC_TRACER
436	help
437	  Mmiotrace traces Memory Mapped I/O access and is meant for
438	  debugging and reverse engineering. It is called from the ioremap
439	  implementation and works via page faults. Tracing is disabled by
440	  default and can be enabled at run-time.
441
442	  See Documentation/trace/mmiotrace.rst.
443	  If you are not helping to develop drivers, say N.
444
445config ENABLE_DEFAULT_TRACERS
446	bool "Trace process context switches and events"
447	depends on !GENERIC_TRACER
448	select TRACING
449	help
450	  This tracer hooks to various trace points in the kernel,
451	  allowing the user to pick and choose which trace point they
452	  want to trace. It also includes the sched_switch tracer plugin.
453
454config FTRACE_SYSCALLS
455	bool "Trace syscalls"
456	depends on HAVE_SYSCALL_TRACEPOINTS
457	select GENERIC_TRACER
458	select KALLSYMS
459	help
460	  Basic tracer to catch the syscall entry and exit events.
461
462config TRACER_SNAPSHOT
463	bool "Create a snapshot trace buffer"
464	select TRACER_MAX_TRACE
465	help
466	  Allow tracing users to take snapshot of the current buffer using the
467	  ftrace interface, e.g.:
468
469	      echo 1 > /sys/kernel/debug/tracing/snapshot
470	      cat snapshot
471
472config TRACER_SNAPSHOT_PER_CPU_SWAP
473	bool "Allow snapshot to swap per CPU"
474	depends on TRACER_SNAPSHOT
475	select RING_BUFFER_ALLOW_SWAP
476	help
477	  Allow doing a snapshot of a single CPU buffer instead of a
478	  full swap (all buffers). If this is set, then the following is
479	  allowed:
480
481	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
482
483	  After which, only the tracing buffer for CPU 2 was swapped with
484	  the main tracing buffer, and the other CPU buffers remain the same.
485
486	  When this is enabled, this adds a little more overhead to the
487	  trace recording, as it needs to add some checks to synchronize
488	  recording with swaps. But this does not affect the performance
489	  of the overall system. This is enabled by default when the preempt
490	  or irq latency tracers are enabled, as those need to swap as well
491	  and already adds the overhead (plus a lot more).
492
493config TRACE_BRANCH_PROFILING
494	bool
495	select GENERIC_TRACER
496
497choice
498	prompt "Branch Profiling"
499	default BRANCH_PROFILE_NONE
500	help
501	 The branch profiling is a software profiler. It will add hooks
502	 into the C conditionals to test which path a branch takes.
503
504	 The likely/unlikely profiler only looks at the conditions that
505	 are annotated with a likely or unlikely macro.
506
507	 The "all branch" profiler will profile every if-statement in the
508	 kernel. This profiler will also enable the likely/unlikely
509	 profiler.
510
511	 Either of the above profilers adds a bit of overhead to the system.
512	 If unsure, choose "No branch profiling".
513
514config BRANCH_PROFILE_NONE
515	bool "No branch profiling"
516	help
517	  No branch profiling. Branch profiling adds a bit of overhead.
518	  Only enable it if you want to analyse the branching behavior.
519	  Otherwise keep it disabled.
520
521config PROFILE_ANNOTATED_BRANCHES
522	bool "Trace likely/unlikely profiler"
523	select TRACE_BRANCH_PROFILING
524	help
525	  This tracer profiles all likely and unlikely macros
526	  in the kernel. It will display the results in:
527
528	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
529
530	  Note: this will add a significant overhead; only turn this
531	  on if you need to profile the system's use of these macros.
532
533config PROFILE_ALL_BRANCHES
534	bool "Profile all if conditionals" if !FORTIFY_SOURCE
535	select TRACE_BRANCH_PROFILING
536	help
537	  This tracer profiles all branch conditions. Every if ()
538	  taken in the kernel is recorded whether it hit or miss.
539	  The results will be displayed in:
540
541	  /sys/kernel/debug/tracing/trace_stat/branch_all
542
543	  This option also enables the likely/unlikely profiler.
544
545	  This configuration, when enabled, will impose a great overhead
546	  on the system. This should only be enabled when the system
547	  is to be analyzed in much detail.
548endchoice
549
550config TRACING_BRANCHES
551	bool
552	help
553	  Selected by tracers that will trace the likely and unlikely
554	  conditions. This prevents the tracers themselves from being
555	  profiled. Profiling the tracing infrastructure can only happen
556	  when the likelys and unlikelys are not being traced.
557
558config BRANCH_TRACER
559	bool "Trace likely/unlikely instances"
560	depends on TRACE_BRANCH_PROFILING
561	select TRACING_BRANCHES
562	help
563	  This traces the events of likely and unlikely condition
564	  calls in the kernel.  The difference between this and the
565	  "Trace likely/unlikely profiler" is that this is not a
566	  histogram of the callers, but actually places the calling
567	  events into a running trace buffer to see when and where the
568	  events happened, as well as their results.
569
570	  Say N if unsure.
571
572config BLK_DEV_IO_TRACE
573	bool "Support for tracing block IO actions"
574	depends on SYSFS
575	depends on BLOCK
576	select RELAY
577	select DEBUG_FS
578	select TRACEPOINTS
579	select GENERIC_TRACER
580	select STACKTRACE
581	help
582	  Say Y here if you want to be able to trace the block layer actions
583	  on a given queue. Tracing allows you to see any traffic happening
584	  on a block device queue. For more information (and the userspace
585	  support tools needed), fetch the blktrace tools from:
586
587	  git://git.kernel.dk/blktrace.git
588
589	  Tracing also is possible using the ftrace interface, e.g.:
590
591	    echo 1 > /sys/block/sda/sda1/trace/enable
592	    echo blk > /sys/kernel/debug/tracing/current_tracer
593	    cat /sys/kernel/debug/tracing/trace_pipe
594
595	  If unsure, say N.
596
597config KPROBE_EVENTS
598	depends on KPROBES
599	depends on HAVE_REGS_AND_STACK_ACCESS_API
600	bool "Enable kprobes-based dynamic events"
601	select TRACING
602	select PROBE_EVENTS
603	select DYNAMIC_EVENTS
604	default y
605	help
606	  This allows the user to add tracing events (similar to tracepoints)
607	  on the fly via the ftrace interface. See
608	  Documentation/trace/kprobetrace.rst for more details.
609
610	  Those events can be inserted wherever kprobes can probe, and record
611	  various register and memory values.
612
613	  This option is also required by perf-probe subcommand of perf tools.
614	  If you want to use perf tools, this option is strongly recommended.
615
616config KPROBE_EVENTS_ON_NOTRACE
617	bool "Do NOT protect notrace function from kprobe events"
618	depends on KPROBE_EVENTS
619	depends on DYNAMIC_FTRACE
620	default n
621	help
622	  This is only for the developers who want to debug ftrace itself
623	  using kprobe events.
624
625	  If kprobes can use ftrace instead of breakpoint, ftrace related
626	  functions are protected from kprobe-events to prevent an infinite
627	  recursion or any unexpected execution path which leads to a kernel
628	  crash.
629
630	  This option disables such protection and allows you to put kprobe
631	  events on ftrace functions for debugging ftrace by itself.
632	  Note that this might let you shoot yourself in the foot.
633
634	  If unsure, say N.
635
636config UPROBE_EVENTS
637	bool "Enable uprobes-based dynamic events"
638	depends on ARCH_SUPPORTS_UPROBES
639	depends on MMU
640	depends on PERF_EVENTS
641	select UPROBES
642	select PROBE_EVENTS
643	select DYNAMIC_EVENTS
644	select TRACING
645	default y
646	help
647	  This allows the user to add tracing events on top of userspace
648	  dynamic events (similar to tracepoints) on the fly via the trace
649	  events interface. Those events can be inserted wherever uprobes
650	  can probe, and record various registers.
651	  This option is required if you plan to use perf-probe subcommand
652	  of perf tools on user space applications.
653
654config BPF_EVENTS
655	depends on BPF_SYSCALL
656	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
657	bool
658	default y
659	help
660	  This allows the user to attach BPF programs to kprobe, uprobe, and
661	  tracepoint events.
662
663config DYNAMIC_EVENTS
664	def_bool n
665
666config PROBE_EVENTS
667	def_bool n
668
669config BPF_KPROBE_OVERRIDE
670	bool "Enable BPF programs to override a kprobed function"
671	depends on BPF_EVENTS
672	depends on FUNCTION_ERROR_INJECTION
673	default n
674	help
675	 Allows BPF to override the execution of a probed function and
676	 set a different return value.  This is used for error injection.
677
678config FTRACE_MCOUNT_RECORD
679	def_bool y
680	depends on DYNAMIC_FTRACE
681	depends on HAVE_FTRACE_MCOUNT_RECORD
682
683config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
684	bool
685	depends on FTRACE_MCOUNT_RECORD
686
687config FTRACE_MCOUNT_USE_CC
688	def_bool y
689	depends on $(cc-option,-mrecord-mcount)
690	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
691	depends on FTRACE_MCOUNT_RECORD
692
693config FTRACE_MCOUNT_USE_OBJTOOL
694	def_bool y
695	depends on HAVE_OBJTOOL_MCOUNT
696	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
697	depends on !FTRACE_MCOUNT_USE_CC
698	depends on FTRACE_MCOUNT_RECORD
699
700config FTRACE_MCOUNT_USE_RECORDMCOUNT
701	def_bool y
702	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
703	depends on !FTRACE_MCOUNT_USE_CC
704	depends on !FTRACE_MCOUNT_USE_OBJTOOL
705	depends on FTRACE_MCOUNT_RECORD
706
707config TRACING_MAP
708	bool
709	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
710	help
711	  tracing_map is a special-purpose lock-free map for tracing,
712	  separated out as a stand-alone facility in order to allow it
713	  to be shared between multiple tracers.  It isn't meant to be
714	  generally used outside of that context, and is normally
715	  selected by tracers that use it.
716
717config SYNTH_EVENTS
718	bool "Synthetic trace events"
719	select TRACING
720	select DYNAMIC_EVENTS
721	default n
722	help
723	  Synthetic events are user-defined trace events that can be
724	  used to combine data from other trace events or in fact any
725	  data source.  Synthetic events can be generated indirectly
726	  via the trace() action of histogram triggers or directly
727	  by way of an in-kernel API.
728
729	  See Documentation/trace/events.rst or
730	  Documentation/trace/histogram.rst for details and examples.
731
732	  If in doubt, say N.
733
734config HIST_TRIGGERS
735	bool "Histogram triggers"
736	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
737	select TRACING_MAP
738	select TRACING
739	select DYNAMIC_EVENTS
740	select SYNTH_EVENTS
741	default n
742	help
743	  Hist triggers allow one or more arbitrary trace event fields
744	  to be aggregated into hash tables and dumped to stdout by
745	  reading a debugfs/tracefs file.  They're useful for
746	  gathering quick and dirty (though precise) summaries of
747	  event activity as an initial guide for further investigation
748	  using more advanced tools.
749
750	  Inter-event tracing of quantities such as latencies is also
751	  supported using hist triggers under this option.
752
753	  See Documentation/trace/histogram.rst.
754	  If in doubt, say N.
755
756config TRACE_EVENT_INJECT
757	bool "Trace event injection"
758	depends on TRACING
759	help
760	  Allow user-space to inject a specific trace event into the ring
761	  buffer. This is mainly used for testing purpose.
762
763	  If unsure, say N.
764
765config TRACEPOINT_BENCHMARK
766	bool "Add tracepoint that benchmarks tracepoints"
767	help
768	 This option creates the tracepoint "benchmark:benchmark_event".
769	 When the tracepoint is enabled, it kicks off a kernel thread that
770	 goes into an infinite loop (calling cond_resched() to let other tasks
771	 run), and calls the tracepoint. Each iteration will record the time
772	 it took to write to the tracepoint and the next iteration that
773	 data will be passed to the tracepoint itself. That is, the tracepoint
774	 will report the time it took to do the previous tracepoint.
775	 The string written to the tracepoint is a static string of 128 bytes
776	 to keep the time the same. The initial string is simply a write of
777	 "START". The second string records the cold cache time of the first
778	 write which is not added to the rest of the calculations.
779
780	 As it is a tight loop, it benchmarks as hot cache. That's fine because
781	 we care most about hot paths that are probably in cache already.
782
783	 An example of the output:
784
785	      START
786	      first=3672 [COLD CACHED]
787	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
788	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
789	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
790	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
791	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
792	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
793
794
795config RING_BUFFER_BENCHMARK
796	tristate "Ring buffer benchmark stress tester"
797	depends on RING_BUFFER
798	help
799	  This option creates a test to stress the ring buffer and benchmark it.
800	  It creates its own ring buffer such that it will not interfere with
801	  any other users of the ring buffer (such as ftrace). It then creates
802	  a producer and consumer that will run for 10 seconds and sleep for
803	  10 seconds. Each interval it will print out the number of events
804	  it recorded and give a rough estimate of how long each iteration took.
805
806	  It does not disable interrupts or raise its priority, so it may be
807	  affected by processes that are running.
808
809	  If unsure, say N.
810
811config TRACE_EVAL_MAP_FILE
812       bool "Show eval mappings for trace events"
813       depends on TRACING
814       help
815	The "print fmt" of the trace events will show the enum/sizeof names
816	instead of their values. This can cause problems for user space tools
817	that use this string to parse the raw data as user space does not know
818	how to convert the string to its value.
819
820	To fix this, there's a special macro in the kernel that can be used
821	to convert an enum/sizeof into its value. If this macro is used, then
822	the print fmt strings will be converted to their values.
823
824	If something does not get converted properly, this option can be
825	used to show what enums/sizeof the kernel tried to convert.
826
827	This option is for debugging the conversions. A file is created
828	in the tracing directory called "eval_map" that will show the
829	names matched with their values and what trace event system they
830	belong too.
831
832	Normally, the mapping of the strings to values will be freed after
833	boot up or module load. With this option, they will not be freed, as
834	they are needed for the "eval_map" file. Enabling this option will
835	increase the memory footprint of the running kernel.
836
837	If unsure, say N.
838
839config FTRACE_RECORD_RECURSION
840	bool "Record functions that recurse in function tracing"
841	depends on FUNCTION_TRACER
842	help
843	  All callbacks that attach to the function tracing have some sort
844	  of protection against recursion. Even though the protection exists,
845	  it adds overhead. This option will create a file in the tracefs
846	  file system called "recursed_functions" that will list the functions
847	  that triggered a recursion.
848
849	  This will add more overhead to cases that have recursion.
850
851	  If unsure, say N
852
853config FTRACE_RECORD_RECURSION_SIZE
854	int "Max number of recursed functions to record"
855	default	128
856	depends on FTRACE_RECORD_RECURSION
857	help
858	  This defines the limit of number of functions that can be
859	  listed in the "recursed_functions" file, that lists all
860	  the functions that caused a recursion to happen.
861	  This file can be reset, but the limit can not change in
862	  size at runtime.
863
864config RING_BUFFER_RECORD_RECURSION
865	bool "Record functions that recurse in the ring buffer"
866	depends on FTRACE_RECORD_RECURSION
867	# default y, because it is coupled with FTRACE_RECORD_RECURSION
868	default y
869	help
870	  The ring buffer has its own internal recursion. Although when
871	  recursion happens it wont cause harm because of the protection,
872	  but it does cause an unwanted overhead. Enabling this option will
873	  place where recursion was detected into the ftrace "recursed_functions"
874	  file.
875
876	  This will add more overhead to cases that have recursion.
877
878config GCOV_PROFILE_FTRACE
879	bool "Enable GCOV profiling on ftrace subsystem"
880	depends on GCOV_KERNEL
881	help
882	  Enable GCOV profiling on ftrace subsystem for checking
883	  which functions/lines are tested.
884
885	  If unsure, say N.
886
887	  Note that on a kernel compiled with this config, ftrace will
888	  run significantly slower.
889
890config FTRACE_SELFTEST
891	bool
892
893config FTRACE_STARTUP_TEST
894	bool "Perform a startup test on ftrace"
895	depends on GENERIC_TRACER
896	select FTRACE_SELFTEST
897	help
898	  This option performs a series of startup tests on ftrace. On bootup
899	  a series of tests are made to verify that the tracer is
900	  functioning properly. It will do tests on all the configured
901	  tracers of ftrace.
902
903config EVENT_TRACE_STARTUP_TEST
904	bool "Run selftest on trace events"
905	depends on FTRACE_STARTUP_TEST
906	default y
907	help
908	  This option performs a test on all trace events in the system.
909	  It basically just enables each event and runs some code that
910	  will trigger events (not necessarily the event it enables)
911	  This may take some time run as there are a lot of events.
912
913config EVENT_TRACE_TEST_SYSCALLS
914	bool "Run selftest on syscall events"
915	depends on EVENT_TRACE_STARTUP_TEST
916	help
917	 This option will also enable testing every syscall event.
918	 It only enables the event and disables it and runs various loads
919	 with the event enabled. This adds a bit more time for kernel boot
920	 up since it runs this on every system call defined.
921
922	 TBD - enable a way to actually call the syscalls as we test their
923	       events
924
925config FTRACE_SORT_STARTUP_TEST
926       bool "Verify compile time sorting of ftrace functions"
927       depends on DYNAMIC_FTRACE
928       depends on BUILDTIME_MCOUNT_SORT
929       help
930	 Sorting of the mcount_loc sections that is used to find the
931	 where the ftrace knows where to patch functions for tracing
932	 and other callbacks is done at compile time. But if the sort
933	 is not done correctly, it will cause non-deterministic failures.
934	 When this is set, the sorted sections will be verified that they
935	 are in deed sorted and will warn if they are not.
936
937	 If unsure, say N
938
939config RING_BUFFER_STARTUP_TEST
940       bool "Ring buffer startup self test"
941       depends on RING_BUFFER
942       help
943	 Run a simple self test on the ring buffer on boot up. Late in the
944	 kernel boot sequence, the test will start that kicks off
945	 a thread per cpu. Each thread will write various size events
946	 into the ring buffer. Another thread is created to send IPIs
947	 to each of the threads, where the IPI handler will also write
948	 to the ring buffer, to test/stress the nesting ability.
949	 If any anomalies are discovered, a warning will be displayed
950	 and all ring buffers will be disabled.
951
952	 The test runs for 10 seconds. This will slow your boot time
953	 by at least 10 more seconds.
954
955	 At the end of the test, statics and more checks are done.
956	 It will output the stats of each per cpu buffer. What
957	 was written, the sizes, what was read, what was lost, and
958	 other similar details.
959
960	 If unsure, say N
961
962config RING_BUFFER_VALIDATE_TIME_DELTAS
963	bool "Verify ring buffer time stamp deltas"
964	depends on RING_BUFFER
965	help
966	  This will audit the time stamps on the ring buffer sub
967	  buffer to make sure that all the time deltas for the
968	  events on a sub buffer matches the current time stamp.
969	  This audit is performed for every event that is not
970	  interrupted, or interrupting another event. A check
971	  is also made when traversing sub buffers to make sure
972	  that all the deltas on the previous sub buffer do not
973	  add up to be greater than the current time stamp.
974
975	  NOTE: This adds significant overhead to recording of events,
976	  and should only be used to test the logic of the ring buffer.
977	  Do not use it on production systems.
978
979	  Only say Y if you understand what this does, and you
980	  still want it enabled. Otherwise say N
981
982config MMIOTRACE_TEST
983	tristate "Test module for mmiotrace"
984	depends on MMIOTRACE && m
985	help
986	  This is a dumb module for testing mmiotrace. It is very dangerous
987	  as it will write garbage to IO memory starting at a given address.
988	  However, it should be safe to use on e.g. unused portion of VRAM.
989
990	  Say N, unless you absolutely know what you are doing.
991
992config PREEMPTIRQ_DELAY_TEST
993	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
994	depends on m
995	help
996	  Select this option to build a test module that can help test latency
997	  tracers by executing a preempt or irq disable section with a user
998	  configurable delay. The module busy waits for the duration of the
999	  critical section.
1000
1001	  For example, the following invocation generates a burst of three
1002	  irq-disabled critical sections for 500us:
1003	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
1004
1005	  What's more, if you want to attach the test on the cpu which the latency
1006	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
1007	  command.
1008
1009	  If unsure, say N
1010
1011config SYNTH_EVENT_GEN_TEST
1012	tristate "Test module for in-kernel synthetic event generation"
1013	depends on SYNTH_EVENTS
1014	help
1015          This option creates a test module to check the base
1016          functionality of in-kernel synthetic event definition and
1017          generation.
1018
1019          To test, insert the module, and then check the trace buffer
1020	  for the generated sample events.
1021
1022	  If unsure, say N.
1023
1024config KPROBE_EVENT_GEN_TEST
1025	tristate "Test module for in-kernel kprobe event generation"
1026	depends on KPROBE_EVENTS
1027	help
1028          This option creates a test module to check the base
1029          functionality of in-kernel kprobe event definition.
1030
1031          To test, insert the module, and then check the trace buffer
1032	  for the generated kprobe events.
1033
1034	  If unsure, say N.
1035
1036config HIST_TRIGGERS_DEBUG
1037	bool "Hist trigger debug support"
1038	depends on HIST_TRIGGERS
1039	help
1040          Add "hist_debug" file for each event, which when read will
1041          dump out a bunch of internal details about the hist triggers
1042          defined on that event.
1043
1044          The hist_debug file serves a couple of purposes:
1045
1046            - Helps developers verify that nothing is broken.
1047
1048            - Provides educational information to support the details
1049              of the hist trigger internals as described by
1050              Documentation/trace/histogram-design.rst.
1051
1052          The hist_debug output only covers the data structures
1053          related to the histogram definitions themselves and doesn't
1054          display the internals of map buckets or variable values of
1055          running histograms.
1056
1057          If unsure, say N.
1058
1059endif # FTRACE
1060