xref: /openbmc/linux/kernel/trace/Kconfig (revision f3d7c2cd)
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_FUNCTION_TRACER
14	bool
15	help
16	  See Documentation/trace/ftrace-design.rst
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20	help
21	  See Documentation/trace/ftrace-design.rst
22
23config HAVE_DYNAMIC_FTRACE
24	bool
25	help
26	  See Documentation/trace/ftrace-design.rst
27
28config HAVE_DYNAMIC_FTRACE_WITH_REGS
29	bool
30
31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
32	bool
33
34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
35	bool
36	help
37	 If this is set, then arguments and stack can be found from
38	 the pt_regs passed into the function callback regs parameter
39	 by default, even without setting the REGS flag in the ftrace_ops.
40	 This allows for use of regs_get_kernel_argument() and
41	 kernel_stack_pointer().
42
43config HAVE_FTRACE_MCOUNT_RECORD
44	bool
45	help
46	  See Documentation/trace/ftrace-design.rst
47
48config HAVE_SYSCALL_TRACEPOINTS
49	bool
50	help
51	  See Documentation/trace/ftrace-design.rst
52
53config HAVE_FENTRY
54	bool
55	help
56	  Arch supports the gcc options -pg with -mfentry
57
58config HAVE_NOP_MCOUNT
59	bool
60	help
61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
62
63config HAVE_OBJTOOL_MCOUNT
64	bool
65	help
66	  Arch supports objtool --mcount
67
68config HAVE_C_RECORDMCOUNT
69	bool
70	help
71	  C version of recordmcount available?
72
73config TRACER_MAX_TRACE
74	bool
75
76config TRACE_CLOCK
77	bool
78
79config RING_BUFFER
80	bool
81	select TRACE_CLOCK
82	select IRQ_WORK
83
84config EVENT_TRACING
85	select CONTEXT_SWITCH_TRACER
86	select GLOB
87	bool
88
89config CONTEXT_SWITCH_TRACER
90	bool
91
92config RING_BUFFER_ALLOW_SWAP
93	bool
94	help
95	 Allow the use of ring_buffer_swap_cpu.
96	 Adds a very slight overhead to tracing when enabled.
97
98config PREEMPTIRQ_TRACEPOINTS
99	bool
100	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
101	select TRACING
102	default y
103	help
104	  Create preempt/irq toggle tracepoints if needed, so that other parts
105	  of the kernel can use them to generate or add hooks to them.
106
107# All tracer options should select GENERIC_TRACER. For those options that are
108# enabled by all tracers (context switch and event tracer) they select TRACING.
109# This allows those options to appear when no other tracer is selected. But the
110# options do not appear when something else selects it. We need the two options
111# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
112# hiding of the automatic options.
113
114config TRACING
115	bool
116	select RING_BUFFER
117	select STACKTRACE if STACKTRACE_SUPPORT
118	select TRACEPOINTS
119	select NOP_TRACER
120	select BINARY_PRINTF
121	select EVENT_TRACING
122	select TRACE_CLOCK
123
124config GENERIC_TRACER
125	bool
126	select TRACING
127
128#
129# Minimum requirements an architecture has to meet for us to
130# be able to offer generic tracing facilities:
131#
132config TRACING_SUPPORT
133	bool
134	depends on TRACE_IRQFLAGS_SUPPORT
135	depends on STACKTRACE_SUPPORT
136	default y
137
138menuconfig FTRACE
139	bool "Tracers"
140	depends on TRACING_SUPPORT
141	default y if DEBUG_KERNEL
142	help
143	  Enable the kernel tracing infrastructure.
144
145if FTRACE
146
147config BOOTTIME_TRACING
148	bool "Boot-time Tracing support"
149	depends on TRACING
150	select BOOT_CONFIG
151	help
152	  Enable developer to setup ftrace subsystem via supplemental
153	  kernel cmdline at boot time for debugging (tracing) driver
154	  initialization and boot process.
155
156config FUNCTION_TRACER
157	bool "Kernel Function Tracer"
158	depends on HAVE_FUNCTION_TRACER
159	select KALLSYMS
160	select GENERIC_TRACER
161	select CONTEXT_SWITCH_TRACER
162	select GLOB
163	select TASKS_RCU if PREEMPTION
164	select TASKS_RUDE_RCU
165	help
166	  Enable the kernel to trace every kernel function. This is done
167	  by using a compiler feature to insert a small, 5-byte No-Operation
168	  instruction at the beginning of every kernel function, which NOP
169	  sequence is then dynamically patched into a tracer call when
170	  tracing is enabled by the administrator. If it's runtime disabled
171	  (the bootup default), then the overhead of the instructions is very
172	  small and not measurable even in micro-benchmarks.
173
174config FUNCTION_GRAPH_TRACER
175	bool "Kernel Function Graph Tracer"
176	depends on HAVE_FUNCTION_GRAPH_TRACER
177	depends on FUNCTION_TRACER
178	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
179	default y
180	help
181	  Enable the kernel to trace a function at both its return
182	  and its entry.
183	  Its first purpose is to trace the duration of functions and
184	  draw a call graph for each thread with some information like
185	  the return value. This is done by setting the current return
186	  address on the current task structure into a stack of calls.
187
188config DYNAMIC_FTRACE
189	bool "enable/disable function tracing dynamically"
190	depends on FUNCTION_TRACER
191	depends on HAVE_DYNAMIC_FTRACE
192	default y
193	help
194	  This option will modify all the calls to function tracing
195	  dynamically (will patch them out of the binary image and
196	  replace them with a No-Op instruction) on boot up. During
197	  compile time, a table is made of all the locations that ftrace
198	  can function trace, and this table is linked into the kernel
199	  image. When this is enabled, functions can be individually
200	  enabled, and the functions not enabled will not affect
201	  performance of the system.
202
203	  See the files in /sys/kernel/debug/tracing:
204	    available_filter_functions
205	    set_ftrace_filter
206	    set_ftrace_notrace
207
208	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
209	  otherwise has native performance as long as no tracing is active.
210
211config DYNAMIC_FTRACE_WITH_REGS
212	def_bool y
213	depends on DYNAMIC_FTRACE
214	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
215
216config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
217	def_bool y
218	depends on DYNAMIC_FTRACE_WITH_REGS
219	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
220
221config DYNAMIC_FTRACE_WITH_ARGS
222	def_bool y
223	depends on DYNAMIC_FTRACE
224	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
225
226config FUNCTION_PROFILER
227	bool "Kernel function profiler"
228	depends on FUNCTION_TRACER
229	default n
230	help
231	  This option enables the kernel function profiler. A file is created
232	  in debugfs called function_profile_enabled which defaults to zero.
233	  When a 1 is echoed into this file profiling begins, and when a
234	  zero is entered, profiling stops. A "functions" file is created in
235	  the trace_stat directory; this file shows the list of functions that
236	  have been hit and their counters.
237
238	  If in doubt, say N.
239
240config STACK_TRACER
241	bool "Trace max stack"
242	depends on HAVE_FUNCTION_TRACER
243	select FUNCTION_TRACER
244	select STACKTRACE
245	select KALLSYMS
246	help
247	  This special tracer records the maximum stack footprint of the
248	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
249
250	  This tracer works by hooking into every function call that the
251	  kernel executes, and keeping a maximum stack depth value and
252	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
253	  then it will not have any overhead while the stack tracer
254	  is disabled.
255
256	  To enable the stack tracer on bootup, pass in 'stacktrace'
257	  on the kernel command line.
258
259	  The stack tracer can also be enabled or disabled via the
260	  sysctl kernel.stack_tracer_enabled
261
262	  Say N if unsure.
263
264config TRACE_PREEMPT_TOGGLE
265	bool
266	help
267	  Enables hooks which will be called when preemption is first disabled,
268	  and last enabled.
269
270config IRQSOFF_TRACER
271	bool "Interrupts-off Latency Tracer"
272	default n
273	depends on TRACE_IRQFLAGS_SUPPORT
274	select TRACE_IRQFLAGS
275	select GENERIC_TRACER
276	select TRACER_MAX_TRACE
277	select RING_BUFFER_ALLOW_SWAP
278	select TRACER_SNAPSHOT
279	select TRACER_SNAPSHOT_PER_CPU_SWAP
280	help
281	  This option measures the time spent in irqs-off critical
282	  sections, with microsecond accuracy.
283
284	  The default measurement method is a maximum search, which is
285	  disabled by default and can be runtime (re-)started
286	  via:
287
288	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
289
290	  (Note that kernel size and overhead increase with this option
291	  enabled. This option and the preempt-off timing option can be
292	  used together or separately.)
293
294config PREEMPT_TRACER
295	bool "Preemption-off Latency Tracer"
296	default n
297	depends on PREEMPTION
298	select GENERIC_TRACER
299	select TRACER_MAX_TRACE
300	select RING_BUFFER_ALLOW_SWAP
301	select TRACER_SNAPSHOT
302	select TRACER_SNAPSHOT_PER_CPU_SWAP
303	select TRACE_PREEMPT_TOGGLE
304	help
305	  This option measures the time spent in preemption-off critical
306	  sections, with microsecond accuracy.
307
308	  The default measurement method is a maximum search, which is
309	  disabled by default and can be runtime (re-)started
310	  via:
311
312	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
313
314	  (Note that kernel size and overhead increase with this option
315	  enabled. This option and the irqs-off timing option can be
316	  used together or separately.)
317
318config SCHED_TRACER
319	bool "Scheduling Latency Tracer"
320	select GENERIC_TRACER
321	select CONTEXT_SWITCH_TRACER
322	select TRACER_MAX_TRACE
323	select TRACER_SNAPSHOT
324	help
325	  This tracer tracks the latency of the highest priority task
326	  to be scheduled in, starting from the point it has woken up.
327
328config HWLAT_TRACER
329	bool "Tracer to detect hardware latencies (like SMIs)"
330	select GENERIC_TRACER
331	help
332	 This tracer, when enabled will create one or more kernel threads,
333	 depending on what the cpumask file is set to, which each thread
334	 spinning in a loop looking for interruptions caused by
335	 something other than the kernel. For example, if a
336	 System Management Interrupt (SMI) takes a noticeable amount of
337	 time, this tracer will detect it. This is useful for testing
338	 if a system is reliable for Real Time tasks.
339
340	 Some files are created in the tracing directory when this
341	 is enabled:
342
343	   hwlat_detector/width   - time in usecs for how long to spin for
344	   hwlat_detector/window  - time in usecs between the start of each
345				     iteration
346
347	 A kernel thread is created that will spin with interrupts disabled
348	 for "width" microseconds in every "window" cycle. It will not spin
349	 for "window - width" microseconds, where the system can
350	 continue to operate.
351
352	 The output will appear in the trace and trace_pipe files.
353
354	 When the tracer is not running, it has no affect on the system,
355	 but when it is running, it can cause the system to be
356	 periodically non responsive. Do not run this tracer on a
357	 production system.
358
359	 To enable this tracer, echo in "hwlat" into the current_tracer
360	 file. Every time a latency is greater than tracing_thresh, it will
361	 be recorded into the ring buffer.
362
363config OSNOISE_TRACER
364	bool "OS Noise tracer"
365	select GENERIC_TRACER
366	help
367	  In the context of high-performance computing (HPC), the Operating
368	  System Noise (osnoise) refers to the interference experienced by an
369	  application due to activities inside the operating system. In the
370	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
371	  can cause noise to the system. Moreover, hardware-related jobs can
372	  also cause noise, for example, via SMIs.
373
374	  The osnoise tracer leverages the hwlat_detector by running a similar
375	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
376	  the sources of osnoise during its execution. The osnoise tracer takes
377	  note of the entry and exit point of any source of interferences,
378	  increasing a per-cpu interference counter. It saves an interference
379	  counter for each source of interference. The interference counter for
380	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
381	  observes these interferences' entry events. When a noise happens
382	  without any interference from the operating system level, the
383	  hardware noise counter increases, pointing to a hardware-related
384	  noise. In this way, osnoise can account for any source of
385	  interference. At the end of the period, the osnoise tracer prints
386	  the sum of all noise, the max single noise, the percentage of CPU
387	  available for the thread, and the counters for the noise sources.
388
389	  In addition to the tracer, a set of tracepoints were added to
390	  facilitate the identification of the osnoise source.
391
392	  The output will appear in the trace and trace_pipe files.
393
394	  To enable this tracer, echo in "osnoise" into the current_tracer
395          file.
396
397config TIMERLAT_TRACER
398	bool "Timerlat tracer"
399	select OSNOISE_TRACER
400	select GENERIC_TRACER
401	help
402	  The timerlat tracer aims to help the preemptive kernel developers
403	  to find sources of wakeup latencies of real-time threads.
404
405	  The tracer creates a per-cpu kernel thread with real-time priority.
406	  The tracer thread sets a periodic timer to wakeup itself, and goes
407	  to sleep waiting for the timer to fire. At the wakeup, the thread
408	  then computes a wakeup latency value as the difference between
409	  the current time and the absolute time that the timer was set
410	  to expire.
411
412	  The tracer prints two lines at every activation. The first is the
413	  timer latency observed at the hardirq context before the
414	  activation of the thread. The second is the timer latency observed
415	  by the thread, which is the same level that cyclictest reports. The
416	  ACTIVATION ID field serves to relate the irq execution to its
417	  respective thread execution.
418
419	  The tracer is build on top of osnoise tracer, and the osnoise:
420	  events can be used to trace the source of interference from NMI,
421	  IRQs and other threads. It also enables the capture of the
422	  stacktrace at the IRQ context, which helps to identify the code
423	  path that can cause thread delay.
424
425config MMIOTRACE
426	bool "Memory mapped IO tracing"
427	depends on HAVE_MMIOTRACE_SUPPORT && PCI
428	select GENERIC_TRACER
429	help
430	  Mmiotrace traces Memory Mapped I/O access and is meant for
431	  debugging and reverse engineering. It is called from the ioremap
432	  implementation and works via page faults. Tracing is disabled by
433	  default and can be enabled at run-time.
434
435	  See Documentation/trace/mmiotrace.rst.
436	  If you are not helping to develop drivers, say N.
437
438config ENABLE_DEFAULT_TRACERS
439	bool "Trace process context switches and events"
440	depends on !GENERIC_TRACER
441	select TRACING
442	help
443	  This tracer hooks to various trace points in the kernel,
444	  allowing the user to pick and choose which trace point they
445	  want to trace. It also includes the sched_switch tracer plugin.
446
447config FTRACE_SYSCALLS
448	bool "Trace syscalls"
449	depends on HAVE_SYSCALL_TRACEPOINTS
450	select GENERIC_TRACER
451	select KALLSYMS
452	help
453	  Basic tracer to catch the syscall entry and exit events.
454
455config TRACER_SNAPSHOT
456	bool "Create a snapshot trace buffer"
457	select TRACER_MAX_TRACE
458	help
459	  Allow tracing users to take snapshot of the current buffer using the
460	  ftrace interface, e.g.:
461
462	      echo 1 > /sys/kernel/debug/tracing/snapshot
463	      cat snapshot
464
465config TRACER_SNAPSHOT_PER_CPU_SWAP
466	bool "Allow snapshot to swap per CPU"
467	depends on TRACER_SNAPSHOT
468	select RING_BUFFER_ALLOW_SWAP
469	help
470	  Allow doing a snapshot of a single CPU buffer instead of a
471	  full swap (all buffers). If this is set, then the following is
472	  allowed:
473
474	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
475
476	  After which, only the tracing buffer for CPU 2 was swapped with
477	  the main tracing buffer, and the other CPU buffers remain the same.
478
479	  When this is enabled, this adds a little more overhead to the
480	  trace recording, as it needs to add some checks to synchronize
481	  recording with swaps. But this does not affect the performance
482	  of the overall system. This is enabled by default when the preempt
483	  or irq latency tracers are enabled, as those need to swap as well
484	  and already adds the overhead (plus a lot more).
485
486config TRACE_BRANCH_PROFILING
487	bool
488	select GENERIC_TRACER
489
490choice
491	prompt "Branch Profiling"
492	default BRANCH_PROFILE_NONE
493	help
494	 The branch profiling is a software profiler. It will add hooks
495	 into the C conditionals to test which path a branch takes.
496
497	 The likely/unlikely profiler only looks at the conditions that
498	 are annotated with a likely or unlikely macro.
499
500	 The "all branch" profiler will profile every if-statement in the
501	 kernel. This profiler will also enable the likely/unlikely
502	 profiler.
503
504	 Either of the above profilers adds a bit of overhead to the system.
505	 If unsure, choose "No branch profiling".
506
507config BRANCH_PROFILE_NONE
508	bool "No branch profiling"
509	help
510	  No branch profiling. Branch profiling adds a bit of overhead.
511	  Only enable it if you want to analyse the branching behavior.
512	  Otherwise keep it disabled.
513
514config PROFILE_ANNOTATED_BRANCHES
515	bool "Trace likely/unlikely profiler"
516	select TRACE_BRANCH_PROFILING
517	help
518	  This tracer profiles all likely and unlikely macros
519	  in the kernel. It will display the results in:
520
521	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
522
523	  Note: this will add a significant overhead; only turn this
524	  on if you need to profile the system's use of these macros.
525
526config PROFILE_ALL_BRANCHES
527	bool "Profile all if conditionals" if !FORTIFY_SOURCE
528	select TRACE_BRANCH_PROFILING
529	help
530	  This tracer profiles all branch conditions. Every if ()
531	  taken in the kernel is recorded whether it hit or miss.
532	  The results will be displayed in:
533
534	  /sys/kernel/debug/tracing/trace_stat/branch_all
535
536	  This option also enables the likely/unlikely profiler.
537
538	  This configuration, when enabled, will impose a great overhead
539	  on the system. This should only be enabled when the system
540	  is to be analyzed in much detail.
541endchoice
542
543config TRACING_BRANCHES
544	bool
545	help
546	  Selected by tracers that will trace the likely and unlikely
547	  conditions. This prevents the tracers themselves from being
548	  profiled. Profiling the tracing infrastructure can only happen
549	  when the likelys and unlikelys are not being traced.
550
551config BRANCH_TRACER
552	bool "Trace likely/unlikely instances"
553	depends on TRACE_BRANCH_PROFILING
554	select TRACING_BRANCHES
555	help
556	  This traces the events of likely and unlikely condition
557	  calls in the kernel.  The difference between this and the
558	  "Trace likely/unlikely profiler" is that this is not a
559	  histogram of the callers, but actually places the calling
560	  events into a running trace buffer to see when and where the
561	  events happened, as well as their results.
562
563	  Say N if unsure.
564
565config BLK_DEV_IO_TRACE
566	bool "Support for tracing block IO actions"
567	depends on SYSFS
568	depends on BLOCK
569	select RELAY
570	select DEBUG_FS
571	select TRACEPOINTS
572	select GENERIC_TRACER
573	select STACKTRACE
574	help
575	  Say Y here if you want to be able to trace the block layer actions
576	  on a given queue. Tracing allows you to see any traffic happening
577	  on a block device queue. For more information (and the userspace
578	  support tools needed), fetch the blktrace tools from:
579
580	  git://git.kernel.dk/blktrace.git
581
582	  Tracing also is possible using the ftrace interface, e.g.:
583
584	    echo 1 > /sys/block/sda/sda1/trace/enable
585	    echo blk > /sys/kernel/debug/tracing/current_tracer
586	    cat /sys/kernel/debug/tracing/trace_pipe
587
588	  If unsure, say N.
589
590config KPROBE_EVENTS
591	depends on KPROBES
592	depends on HAVE_REGS_AND_STACK_ACCESS_API
593	bool "Enable kprobes-based dynamic events"
594	select TRACING
595	select PROBE_EVENTS
596	select DYNAMIC_EVENTS
597	default y
598	help
599	  This allows the user to add tracing events (similar to tracepoints)
600	  on the fly via the ftrace interface. See
601	  Documentation/trace/kprobetrace.rst for more details.
602
603	  Those events can be inserted wherever kprobes can probe, and record
604	  various register and memory values.
605
606	  This option is also required by perf-probe subcommand of perf tools.
607	  If you want to use perf tools, this option is strongly recommended.
608
609config KPROBE_EVENTS_ON_NOTRACE
610	bool "Do NOT protect notrace function from kprobe events"
611	depends on KPROBE_EVENTS
612	depends on DYNAMIC_FTRACE
613	default n
614	help
615	  This is only for the developers who want to debug ftrace itself
616	  using kprobe events.
617
618	  If kprobes can use ftrace instead of breakpoint, ftrace related
619	  functions are protected from kprobe-events to prevent an infinite
620	  recursion or any unexpected execution path which leads to a kernel
621	  crash.
622
623	  This option disables such protection and allows you to put kprobe
624	  events on ftrace functions for debugging ftrace by itself.
625	  Note that this might let you shoot yourself in the foot.
626
627	  If unsure, say N.
628
629config UPROBE_EVENTS
630	bool "Enable uprobes-based dynamic events"
631	depends on ARCH_SUPPORTS_UPROBES
632	depends on MMU
633	depends on PERF_EVENTS
634	select UPROBES
635	select PROBE_EVENTS
636	select DYNAMIC_EVENTS
637	select TRACING
638	default y
639	help
640	  This allows the user to add tracing events on top of userspace
641	  dynamic events (similar to tracepoints) on the fly via the trace
642	  events interface. Those events can be inserted wherever uprobes
643	  can probe, and record various registers.
644	  This option is required if you plan to use perf-probe subcommand
645	  of perf tools on user space applications.
646
647config BPF_EVENTS
648	depends on BPF_SYSCALL
649	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
650	bool
651	default y
652	help
653	  This allows the user to attach BPF programs to kprobe, uprobe, and
654	  tracepoint events.
655
656config DYNAMIC_EVENTS
657	def_bool n
658
659config PROBE_EVENTS
660	def_bool n
661
662config BPF_KPROBE_OVERRIDE
663	bool "Enable BPF programs to override a kprobed function"
664	depends on BPF_EVENTS
665	depends on FUNCTION_ERROR_INJECTION
666	default n
667	help
668	 Allows BPF to override the execution of a probed function and
669	 set a different return value.  This is used for error injection.
670
671config FTRACE_MCOUNT_RECORD
672	def_bool y
673	depends on DYNAMIC_FTRACE
674	depends on HAVE_FTRACE_MCOUNT_RECORD
675
676config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
677	bool
678	depends on FTRACE_MCOUNT_RECORD
679
680config FTRACE_MCOUNT_USE_CC
681	def_bool y
682	depends on $(cc-option,-mrecord-mcount)
683	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
684	depends on FTRACE_MCOUNT_RECORD
685
686config FTRACE_MCOUNT_USE_OBJTOOL
687	def_bool y
688	depends on HAVE_OBJTOOL_MCOUNT
689	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
690	depends on !FTRACE_MCOUNT_USE_CC
691	depends on FTRACE_MCOUNT_RECORD
692
693config FTRACE_MCOUNT_USE_RECORDMCOUNT
694	def_bool y
695	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
696	depends on !FTRACE_MCOUNT_USE_CC
697	depends on !FTRACE_MCOUNT_USE_OBJTOOL
698	depends on FTRACE_MCOUNT_RECORD
699
700config TRACING_MAP
701	bool
702	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
703	help
704	  tracing_map is a special-purpose lock-free map for tracing,
705	  separated out as a stand-alone facility in order to allow it
706	  to be shared between multiple tracers.  It isn't meant to be
707	  generally used outside of that context, and is normally
708	  selected by tracers that use it.
709
710config SYNTH_EVENTS
711	bool "Synthetic trace events"
712	select TRACING
713	select DYNAMIC_EVENTS
714	default n
715	help
716	  Synthetic events are user-defined trace events that can be
717	  used to combine data from other trace events or in fact any
718	  data source.  Synthetic events can be generated indirectly
719	  via the trace() action of histogram triggers or directly
720	  by way of an in-kernel API.
721
722	  See Documentation/trace/events.rst or
723	  Documentation/trace/histogram.rst for details and examples.
724
725	  If in doubt, say N.
726
727config HIST_TRIGGERS
728	bool "Histogram triggers"
729	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
730	select TRACING_MAP
731	select TRACING
732	select DYNAMIC_EVENTS
733	select SYNTH_EVENTS
734	default n
735	help
736	  Hist triggers allow one or more arbitrary trace event fields
737	  to be aggregated into hash tables and dumped to stdout by
738	  reading a debugfs/tracefs file.  They're useful for
739	  gathering quick and dirty (though precise) summaries of
740	  event activity as an initial guide for further investigation
741	  using more advanced tools.
742
743	  Inter-event tracing of quantities such as latencies is also
744	  supported using hist triggers under this option.
745
746	  See Documentation/trace/histogram.rst.
747	  If in doubt, say N.
748
749config TRACE_EVENT_INJECT
750	bool "Trace event injection"
751	depends on TRACING
752	help
753	  Allow user-space to inject a specific trace event into the ring
754	  buffer. This is mainly used for testing purpose.
755
756	  If unsure, say N.
757
758config TRACEPOINT_BENCHMARK
759	bool "Add tracepoint that benchmarks tracepoints"
760	help
761	 This option creates the tracepoint "benchmark:benchmark_event".
762	 When the tracepoint is enabled, it kicks off a kernel thread that
763	 goes into an infinite loop (calling cond_resched() to let other tasks
764	 run), and calls the tracepoint. Each iteration will record the time
765	 it took to write to the tracepoint and the next iteration that
766	 data will be passed to the tracepoint itself. That is, the tracepoint
767	 will report the time it took to do the previous tracepoint.
768	 The string written to the tracepoint is a static string of 128 bytes
769	 to keep the time the same. The initial string is simply a write of
770	 "START". The second string records the cold cache time of the first
771	 write which is not added to the rest of the calculations.
772
773	 As it is a tight loop, it benchmarks as hot cache. That's fine because
774	 we care most about hot paths that are probably in cache already.
775
776	 An example of the output:
777
778	      START
779	      first=3672 [COLD CACHED]
780	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
781	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
782	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
783	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
784	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
785	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
786
787
788config RING_BUFFER_BENCHMARK
789	tristate "Ring buffer benchmark stress tester"
790	depends on RING_BUFFER
791	help
792	  This option creates a test to stress the ring buffer and benchmark it.
793	  It creates its own ring buffer such that it will not interfere with
794	  any other users of the ring buffer (such as ftrace). It then creates
795	  a producer and consumer that will run for 10 seconds and sleep for
796	  10 seconds. Each interval it will print out the number of events
797	  it recorded and give a rough estimate of how long each iteration took.
798
799	  It does not disable interrupts or raise its priority, so it may be
800	  affected by processes that are running.
801
802	  If unsure, say N.
803
804config TRACE_EVAL_MAP_FILE
805       bool "Show eval mappings for trace events"
806       depends on TRACING
807       help
808	The "print fmt" of the trace events will show the enum/sizeof names
809	instead of their values. This can cause problems for user space tools
810	that use this string to parse the raw data as user space does not know
811	how to convert the string to its value.
812
813	To fix this, there's a special macro in the kernel that can be used
814	to convert an enum/sizeof into its value. If this macro is used, then
815	the print fmt strings will be converted to their values.
816
817	If something does not get converted properly, this option can be
818	used to show what enums/sizeof the kernel tried to convert.
819
820	This option is for debugging the conversions. A file is created
821	in the tracing directory called "eval_map" that will show the
822	names matched with their values and what trace event system they
823	belong too.
824
825	Normally, the mapping of the strings to values will be freed after
826	boot up or module load. With this option, they will not be freed, as
827	they are needed for the "eval_map" file. Enabling this option will
828	increase the memory footprint of the running kernel.
829
830	If unsure, say N.
831
832config FTRACE_RECORD_RECURSION
833	bool "Record functions that recurse in function tracing"
834	depends on FUNCTION_TRACER
835	help
836	  All callbacks that attach to the function tracing have some sort
837	  of protection against recursion. Even though the protection exists,
838	  it adds overhead. This option will create a file in the tracefs
839	  file system called "recursed_functions" that will list the functions
840	  that triggered a recursion.
841
842	  This will add more overhead to cases that have recursion.
843
844	  If unsure, say N
845
846config FTRACE_RECORD_RECURSION_SIZE
847	int "Max number of recursed functions to record"
848	default	128
849	depends on FTRACE_RECORD_RECURSION
850	help
851	  This defines the limit of number of functions that can be
852	  listed in the "recursed_functions" file, that lists all
853	  the functions that caused a recursion to happen.
854	  This file can be reset, but the limit can not change in
855	  size at runtime.
856
857config RING_BUFFER_RECORD_RECURSION
858	bool "Record functions that recurse in the ring buffer"
859	depends on FTRACE_RECORD_RECURSION
860	# default y, because it is coupled with FTRACE_RECORD_RECURSION
861	default y
862	help
863	  The ring buffer has its own internal recursion. Although when
864	  recursion happens it wont cause harm because of the protection,
865	  but it does cause an unwanted overhead. Enabling this option will
866	  place where recursion was detected into the ftrace "recursed_functions"
867	  file.
868
869	  This will add more overhead to cases that have recursion.
870
871config GCOV_PROFILE_FTRACE
872	bool "Enable GCOV profiling on ftrace subsystem"
873	depends on GCOV_KERNEL
874	help
875	  Enable GCOV profiling on ftrace subsystem for checking
876	  which functions/lines are tested.
877
878	  If unsure, say N.
879
880	  Note that on a kernel compiled with this config, ftrace will
881	  run significantly slower.
882
883config FTRACE_SELFTEST
884	bool
885
886config FTRACE_STARTUP_TEST
887	bool "Perform a startup test on ftrace"
888	depends on GENERIC_TRACER
889	select FTRACE_SELFTEST
890	help
891	  This option performs a series of startup tests on ftrace. On bootup
892	  a series of tests are made to verify that the tracer is
893	  functioning properly. It will do tests on all the configured
894	  tracers of ftrace.
895
896config EVENT_TRACE_STARTUP_TEST
897	bool "Run selftest on trace events"
898	depends on FTRACE_STARTUP_TEST
899	default y
900	help
901	  This option performs a test on all trace events in the system.
902	  It basically just enables each event and runs some code that
903	  will trigger events (not necessarily the event it enables)
904	  This may take some time run as there are a lot of events.
905
906config EVENT_TRACE_TEST_SYSCALLS
907	bool "Run selftest on syscall events"
908	depends on EVENT_TRACE_STARTUP_TEST
909	help
910	 This option will also enable testing every syscall event.
911	 It only enables the event and disables it and runs various loads
912	 with the event enabled. This adds a bit more time for kernel boot
913	 up since it runs this on every system call defined.
914
915	 TBD - enable a way to actually call the syscalls as we test their
916	       events
917
918config RING_BUFFER_STARTUP_TEST
919       bool "Ring buffer startup self test"
920       depends on RING_BUFFER
921       help
922	 Run a simple self test on the ring buffer on boot up. Late in the
923	 kernel boot sequence, the test will start that kicks off
924	 a thread per cpu. Each thread will write various size events
925	 into the ring buffer. Another thread is created to send IPIs
926	 to each of the threads, where the IPI handler will also write
927	 to the ring buffer, to test/stress the nesting ability.
928	 If any anomalies are discovered, a warning will be displayed
929	 and all ring buffers will be disabled.
930
931	 The test runs for 10 seconds. This will slow your boot time
932	 by at least 10 more seconds.
933
934	 At the end of the test, statics and more checks are done.
935	 It will output the stats of each per cpu buffer. What
936	 was written, the sizes, what was read, what was lost, and
937	 other similar details.
938
939	 If unsure, say N
940
941config RING_BUFFER_VALIDATE_TIME_DELTAS
942	bool "Verify ring buffer time stamp deltas"
943	depends on RING_BUFFER
944	help
945	  This will audit the time stamps on the ring buffer sub
946	  buffer to make sure that all the time deltas for the
947	  events on a sub buffer matches the current time stamp.
948	  This audit is performed for every event that is not
949	  interrupted, or interrupting another event. A check
950	  is also made when traversing sub buffers to make sure
951	  that all the deltas on the previous sub buffer do not
952	  add up to be greater than the current time stamp.
953
954	  NOTE: This adds significant overhead to recording of events,
955	  and should only be used to test the logic of the ring buffer.
956	  Do not use it on production systems.
957
958	  Only say Y if you understand what this does, and you
959	  still want it enabled. Otherwise say N
960
961config MMIOTRACE_TEST
962	tristate "Test module for mmiotrace"
963	depends on MMIOTRACE && m
964	help
965	  This is a dumb module for testing mmiotrace. It is very dangerous
966	  as it will write garbage to IO memory starting at a given address.
967	  However, it should be safe to use on e.g. unused portion of VRAM.
968
969	  Say N, unless you absolutely know what you are doing.
970
971config PREEMPTIRQ_DELAY_TEST
972	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
973	depends on m
974	help
975	  Select this option to build a test module that can help test latency
976	  tracers by executing a preempt or irq disable section with a user
977	  configurable delay. The module busy waits for the duration of the
978	  critical section.
979
980	  For example, the following invocation generates a burst of three
981	  irq-disabled critical sections for 500us:
982	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
983
984	  What's more, if you want to attach the test on the cpu which the latency
985	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
986	  command.
987
988	  If unsure, say N
989
990config SYNTH_EVENT_GEN_TEST
991	tristate "Test module for in-kernel synthetic event generation"
992	depends on SYNTH_EVENTS
993	help
994          This option creates a test module to check the base
995          functionality of in-kernel synthetic event definition and
996          generation.
997
998          To test, insert the module, and then check the trace buffer
999	  for the generated sample events.
1000
1001	  If unsure, say N.
1002
1003config KPROBE_EVENT_GEN_TEST
1004	tristate "Test module for in-kernel kprobe event generation"
1005	depends on KPROBE_EVENTS
1006	help
1007          This option creates a test module to check the base
1008          functionality of in-kernel kprobe event definition.
1009
1010          To test, insert the module, and then check the trace buffer
1011	  for the generated kprobe events.
1012
1013	  If unsure, say N.
1014
1015config HIST_TRIGGERS_DEBUG
1016	bool "Hist trigger debug support"
1017	depends on HIST_TRIGGERS
1018	help
1019          Add "hist_debug" file for each event, which when read will
1020          dump out a bunch of internal details about the hist triggers
1021          defined on that event.
1022
1023          The hist_debug file serves a couple of purposes:
1024
1025            - Helps developers verify that nothing is broken.
1026
1027            - Provides educational information to support the details
1028              of the hist trigger internals as described by
1029              Documentation/trace/histogram-design.rst.
1030
1031          The hist_debug output only covers the data structures
1032          related to the histogram definitions themselves and doesn't
1033          display the internals of map buckets or variable values of
1034          running histograms.
1035
1036          If unsure, say N.
1037
1038endif # FTRACE
1039