xref: /openbmc/linux/kernel/trace/Kconfig (revision e25e43a4)
1# SPDX-License-Identifier: GPL-2.0-only
2#
3# Architectures that offer an FUNCTION_TRACER implementation should
4#  select HAVE_FUNCTION_TRACER:
5#
6
7config USER_STACKTRACE_SUPPORT
8	bool
9
10config NOP_TRACER
11	bool
12
13config HAVE_RETHOOK
14	bool
15
16config RETHOOK
17	bool
18	depends on HAVE_RETHOOK
19	help
20	  Enable generic return hooking feature. This is an internal
21	  API, which will be used by other function-entry hooking
22	  features like fprobe and kprobes.
23
24config HAVE_FUNCTION_TRACER
25	bool
26	help
27	  See Documentation/trace/ftrace-design.rst
28
29config HAVE_FUNCTION_GRAPH_TRACER
30	bool
31	help
32	  See Documentation/trace/ftrace-design.rst
33
34config HAVE_DYNAMIC_FTRACE
35	bool
36	help
37	  See Documentation/trace/ftrace-design.rst
38
39config HAVE_DYNAMIC_FTRACE_WITH_REGS
40	bool
41
42config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
43	bool
44
45config HAVE_DYNAMIC_FTRACE_WITH_ARGS
46	bool
47	help
48	 If this is set, then arguments and stack can be found from
49	 the pt_regs passed into the function callback regs parameter
50	 by default, even without setting the REGS flag in the ftrace_ops.
51	 This allows for use of regs_get_kernel_argument() and
52	 kernel_stack_pointer().
53
54config HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
55	bool
56	help
57	  If the architecture generates __patchable_function_entries sections
58	  but does not want them included in the ftrace locations.
59
60config HAVE_FTRACE_MCOUNT_RECORD
61	bool
62	help
63	  See Documentation/trace/ftrace-design.rst
64
65config HAVE_SYSCALL_TRACEPOINTS
66	bool
67	help
68	  See Documentation/trace/ftrace-design.rst
69
70config HAVE_FENTRY
71	bool
72	help
73	  Arch supports the gcc options -pg with -mfentry
74
75config HAVE_NOP_MCOUNT
76	bool
77	help
78	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
79
80config HAVE_OBJTOOL_MCOUNT
81	bool
82	help
83	  Arch supports objtool --mcount
84
85config HAVE_C_RECORDMCOUNT
86	bool
87	help
88	  C version of recordmcount available?
89
90config HAVE_BUILDTIME_MCOUNT_SORT
91       bool
92       help
93         An architecture selects this if it sorts the mcount_loc section
94	 at build time.
95
96config BUILDTIME_MCOUNT_SORT
97       bool
98       default y
99       depends on HAVE_BUILDTIME_MCOUNT_SORT && DYNAMIC_FTRACE
100       help
101         Sort the mcount_loc section at build time.
102
103config TRACER_MAX_TRACE
104	bool
105
106config TRACE_CLOCK
107	bool
108
109config RING_BUFFER
110	bool
111	select TRACE_CLOCK
112	select IRQ_WORK
113
114config EVENT_TRACING
115	select CONTEXT_SWITCH_TRACER
116	select GLOB
117	bool
118
119config CONTEXT_SWITCH_TRACER
120	bool
121
122config RING_BUFFER_ALLOW_SWAP
123	bool
124	help
125	 Allow the use of ring_buffer_swap_cpu.
126	 Adds a very slight overhead to tracing when enabled.
127
128config PREEMPTIRQ_TRACEPOINTS
129	bool
130	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
131	select TRACING
132	default y
133	help
134	  Create preempt/irq toggle tracepoints if needed, so that other parts
135	  of the kernel can use them to generate or add hooks to them.
136
137# All tracer options should select GENERIC_TRACER. For those options that are
138# enabled by all tracers (context switch and event tracer) they select TRACING.
139# This allows those options to appear when no other tracer is selected. But the
140# options do not appear when something else selects it. We need the two options
141# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
142# hiding of the automatic options.
143
144config TRACING
145	bool
146	select RING_BUFFER
147	select STACKTRACE if STACKTRACE_SUPPORT
148	select TRACEPOINTS
149	select NOP_TRACER
150	select BINARY_PRINTF
151	select EVENT_TRACING
152	select TRACE_CLOCK
153	select TASKS_RCU if PREEMPTION
154
155config GENERIC_TRACER
156	bool
157	select TRACING
158
159#
160# Minimum requirements an architecture has to meet for us to
161# be able to offer generic tracing facilities:
162#
163config TRACING_SUPPORT
164	bool
165	depends on TRACE_IRQFLAGS_SUPPORT
166	depends on STACKTRACE_SUPPORT
167	default y
168
169menuconfig FTRACE
170	bool "Tracers"
171	depends on TRACING_SUPPORT
172	default y if DEBUG_KERNEL
173	help
174	  Enable the kernel tracing infrastructure.
175
176if FTRACE
177
178config BOOTTIME_TRACING
179	bool "Boot-time Tracing support"
180	depends on TRACING
181	select BOOT_CONFIG
182	help
183	  Enable developer to setup ftrace subsystem via supplemental
184	  kernel cmdline at boot time for debugging (tracing) driver
185	  initialization and boot process.
186
187config FUNCTION_TRACER
188	bool "Kernel Function Tracer"
189	depends on HAVE_FUNCTION_TRACER
190	select KALLSYMS
191	select GENERIC_TRACER
192	select CONTEXT_SWITCH_TRACER
193	select GLOB
194	select TASKS_RCU if PREEMPTION
195	select TASKS_RUDE_RCU
196	help
197	  Enable the kernel to trace every kernel function. This is done
198	  by using a compiler feature to insert a small, 5-byte No-Operation
199	  instruction at the beginning of every kernel function, which NOP
200	  sequence is then dynamically patched into a tracer call when
201	  tracing is enabled by the administrator. If it's runtime disabled
202	  (the bootup default), then the overhead of the instructions is very
203	  small and not measurable even in micro-benchmarks (at least on
204	  x86, but may have impact on other architectures).
205
206config FUNCTION_GRAPH_TRACER
207	bool "Kernel Function Graph Tracer"
208	depends on HAVE_FUNCTION_GRAPH_TRACER
209	depends on FUNCTION_TRACER
210	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
211	default y
212	help
213	  Enable the kernel to trace a function at both its return
214	  and its entry.
215	  Its first purpose is to trace the duration of functions and
216	  draw a call graph for each thread with some information like
217	  the return value. This is done by setting the current return
218	  address on the current task structure into a stack of calls.
219
220config DYNAMIC_FTRACE
221	bool "enable/disable function tracing dynamically"
222	depends on FUNCTION_TRACER
223	depends on HAVE_DYNAMIC_FTRACE
224	default y
225	help
226	  This option will modify all the calls to function tracing
227	  dynamically (will patch them out of the binary image and
228	  replace them with a No-Op instruction) on boot up. During
229	  compile time, a table is made of all the locations that ftrace
230	  can function trace, and this table is linked into the kernel
231	  image. When this is enabled, functions can be individually
232	  enabled, and the functions not enabled will not affect
233	  performance of the system.
234
235	  See the files in /sys/kernel/debug/tracing:
236	    available_filter_functions
237	    set_ftrace_filter
238	    set_ftrace_notrace
239
240	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
241	  otherwise has native performance as long as no tracing is active.
242
243config DYNAMIC_FTRACE_WITH_REGS
244	def_bool y
245	depends on DYNAMIC_FTRACE
246	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
247
248config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
249	def_bool y
250	depends on DYNAMIC_FTRACE_WITH_REGS
251	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
252
253config DYNAMIC_FTRACE_WITH_ARGS
254	def_bool y
255	depends on DYNAMIC_FTRACE
256	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
257
258config FPROBE
259	bool "Kernel Function Probe (fprobe)"
260	depends on FUNCTION_TRACER
261	depends on DYNAMIC_FTRACE_WITH_REGS
262	depends on HAVE_RETHOOK
263	select RETHOOK
264	default n
265	help
266	  This option enables kernel function probe (fprobe) based on ftrace.
267	  The fprobe is similar to kprobes, but probes only for kernel function
268	  entries and exits. This also can probe multiple functions by one
269	  fprobe.
270
271	  If unsure, say N.
272
273config FUNCTION_PROFILER
274	bool "Kernel function profiler"
275	depends on FUNCTION_TRACER
276	default n
277	help
278	  This option enables the kernel function profiler. A file is created
279	  in debugfs called function_profile_enabled which defaults to zero.
280	  When a 1 is echoed into this file profiling begins, and when a
281	  zero is entered, profiling stops. A "functions" file is created in
282	  the trace_stat directory; this file shows the list of functions that
283	  have been hit and their counters.
284
285	  If in doubt, say N.
286
287config STACK_TRACER
288	bool "Trace max stack"
289	depends on HAVE_FUNCTION_TRACER
290	select FUNCTION_TRACER
291	select STACKTRACE
292	select KALLSYMS
293	help
294	  This special tracer records the maximum stack footprint of the
295	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
296
297	  This tracer works by hooking into every function call that the
298	  kernel executes, and keeping a maximum stack depth value and
299	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
300	  then it will not have any overhead while the stack tracer
301	  is disabled.
302
303	  To enable the stack tracer on bootup, pass in 'stacktrace'
304	  on the kernel command line.
305
306	  The stack tracer can also be enabled or disabled via the
307	  sysctl kernel.stack_tracer_enabled
308
309	  Say N if unsure.
310
311config TRACE_PREEMPT_TOGGLE
312	bool
313	help
314	  Enables hooks which will be called when preemption is first disabled,
315	  and last enabled.
316
317config IRQSOFF_TRACER
318	bool "Interrupts-off Latency Tracer"
319	default n
320	depends on TRACE_IRQFLAGS_SUPPORT
321	select TRACE_IRQFLAGS
322	select GENERIC_TRACER
323	select TRACER_MAX_TRACE
324	select RING_BUFFER_ALLOW_SWAP
325	select TRACER_SNAPSHOT
326	select TRACER_SNAPSHOT_PER_CPU_SWAP
327	help
328	  This option measures the time spent in irqs-off critical
329	  sections, with microsecond accuracy.
330
331	  The default measurement method is a maximum search, which is
332	  disabled by default and can be runtime (re-)started
333	  via:
334
335	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
336
337	  (Note that kernel size and overhead increase with this option
338	  enabled. This option and the preempt-off timing option can be
339	  used together or separately.)
340
341config PREEMPT_TRACER
342	bool "Preemption-off Latency Tracer"
343	default n
344	depends on PREEMPTION
345	select GENERIC_TRACER
346	select TRACER_MAX_TRACE
347	select RING_BUFFER_ALLOW_SWAP
348	select TRACER_SNAPSHOT
349	select TRACER_SNAPSHOT_PER_CPU_SWAP
350	select TRACE_PREEMPT_TOGGLE
351	help
352	  This option measures the time spent in preemption-off critical
353	  sections, with microsecond accuracy.
354
355	  The default measurement method is a maximum search, which is
356	  disabled by default and can be runtime (re-)started
357	  via:
358
359	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
360
361	  (Note that kernel size and overhead increase with this option
362	  enabled. This option and the irqs-off timing option can be
363	  used together or separately.)
364
365config SCHED_TRACER
366	bool "Scheduling Latency Tracer"
367	select GENERIC_TRACER
368	select CONTEXT_SWITCH_TRACER
369	select TRACER_MAX_TRACE
370	select TRACER_SNAPSHOT
371	help
372	  This tracer tracks the latency of the highest priority task
373	  to be scheduled in, starting from the point it has woken up.
374
375config HWLAT_TRACER
376	bool "Tracer to detect hardware latencies (like SMIs)"
377	select GENERIC_TRACER
378	select TRACER_MAX_TRACE
379	help
380	 This tracer, when enabled will create one or more kernel threads,
381	 depending on what the cpumask file is set to, which each thread
382	 spinning in a loop looking for interruptions caused by
383	 something other than the kernel. For example, if a
384	 System Management Interrupt (SMI) takes a noticeable amount of
385	 time, this tracer will detect it. This is useful for testing
386	 if a system is reliable for Real Time tasks.
387
388	 Some files are created in the tracing directory when this
389	 is enabled:
390
391	   hwlat_detector/width   - time in usecs for how long to spin for
392	   hwlat_detector/window  - time in usecs between the start of each
393				     iteration
394
395	 A kernel thread is created that will spin with interrupts disabled
396	 for "width" microseconds in every "window" cycle. It will not spin
397	 for "window - width" microseconds, where the system can
398	 continue to operate.
399
400	 The output will appear in the trace and trace_pipe files.
401
402	 When the tracer is not running, it has no affect on the system,
403	 but when it is running, it can cause the system to be
404	 periodically non responsive. Do not run this tracer on a
405	 production system.
406
407	 To enable this tracer, echo in "hwlat" into the current_tracer
408	 file. Every time a latency is greater than tracing_thresh, it will
409	 be recorded into the ring buffer.
410
411config OSNOISE_TRACER
412	bool "OS Noise tracer"
413	select GENERIC_TRACER
414	select TRACER_MAX_TRACE
415	help
416	  In the context of high-performance computing (HPC), the Operating
417	  System Noise (osnoise) refers to the interference experienced by an
418	  application due to activities inside the operating system. In the
419	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
420	  can cause noise to the system. Moreover, hardware-related jobs can
421	  also cause noise, for example, via SMIs.
422
423	  The osnoise tracer leverages the hwlat_detector by running a similar
424	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
425	  the sources of osnoise during its execution. The osnoise tracer takes
426	  note of the entry and exit point of any source of interferences,
427	  increasing a per-cpu interference counter. It saves an interference
428	  counter for each source of interference. The interference counter for
429	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
430	  observes these interferences' entry events. When a noise happens
431	  without any interference from the operating system level, the
432	  hardware noise counter increases, pointing to a hardware-related
433	  noise. In this way, osnoise can account for any source of
434	  interference. At the end of the period, the osnoise tracer prints
435	  the sum of all noise, the max single noise, the percentage of CPU
436	  available for the thread, and the counters for the noise sources.
437
438	  In addition to the tracer, a set of tracepoints were added to
439	  facilitate the identification of the osnoise source.
440
441	  The output will appear in the trace and trace_pipe files.
442
443	  To enable this tracer, echo in "osnoise" into the current_tracer
444          file.
445
446config TIMERLAT_TRACER
447	bool "Timerlat tracer"
448	select OSNOISE_TRACER
449	select GENERIC_TRACER
450	help
451	  The timerlat tracer aims to help the preemptive kernel developers
452	  to find sources of wakeup latencies of real-time threads.
453
454	  The tracer creates a per-cpu kernel thread with real-time priority.
455	  The tracer thread sets a periodic timer to wakeup itself, and goes
456	  to sleep waiting for the timer to fire. At the wakeup, the thread
457	  then computes a wakeup latency value as the difference between
458	  the current time and the absolute time that the timer was set
459	  to expire.
460
461	  The tracer prints two lines at every activation. The first is the
462	  timer latency observed at the hardirq context before the
463	  activation of the thread. The second is the timer latency observed
464	  by the thread, which is the same level that cyclictest reports. The
465	  ACTIVATION ID field serves to relate the irq execution to its
466	  respective thread execution.
467
468	  The tracer is build on top of osnoise tracer, and the osnoise:
469	  events can be used to trace the source of interference from NMI,
470	  IRQs and other threads. It also enables the capture of the
471	  stacktrace at the IRQ context, which helps to identify the code
472	  path that can cause thread delay.
473
474config MMIOTRACE
475	bool "Memory mapped IO tracing"
476	depends on HAVE_MMIOTRACE_SUPPORT && PCI
477	select GENERIC_TRACER
478	help
479	  Mmiotrace traces Memory Mapped I/O access and is meant for
480	  debugging and reverse engineering. It is called from the ioremap
481	  implementation and works via page faults. Tracing is disabled by
482	  default and can be enabled at run-time.
483
484	  See Documentation/trace/mmiotrace.rst.
485	  If you are not helping to develop drivers, say N.
486
487config ENABLE_DEFAULT_TRACERS
488	bool "Trace process context switches and events"
489	depends on !GENERIC_TRACER
490	select TRACING
491	help
492	  This tracer hooks to various trace points in the kernel,
493	  allowing the user to pick and choose which trace point they
494	  want to trace. It also includes the sched_switch tracer plugin.
495
496config FTRACE_SYSCALLS
497	bool "Trace syscalls"
498	depends on HAVE_SYSCALL_TRACEPOINTS
499	select GENERIC_TRACER
500	select KALLSYMS
501	help
502	  Basic tracer to catch the syscall entry and exit events.
503
504config TRACER_SNAPSHOT
505	bool "Create a snapshot trace buffer"
506	select TRACER_MAX_TRACE
507	help
508	  Allow tracing users to take snapshot of the current buffer using the
509	  ftrace interface, e.g.:
510
511	      echo 1 > /sys/kernel/debug/tracing/snapshot
512	      cat snapshot
513
514config TRACER_SNAPSHOT_PER_CPU_SWAP
515	bool "Allow snapshot to swap per CPU"
516	depends on TRACER_SNAPSHOT
517	select RING_BUFFER_ALLOW_SWAP
518	help
519	  Allow doing a snapshot of a single CPU buffer instead of a
520	  full swap (all buffers). If this is set, then the following is
521	  allowed:
522
523	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
524
525	  After which, only the tracing buffer for CPU 2 was swapped with
526	  the main tracing buffer, and the other CPU buffers remain the same.
527
528	  When this is enabled, this adds a little more overhead to the
529	  trace recording, as it needs to add some checks to synchronize
530	  recording with swaps. But this does not affect the performance
531	  of the overall system. This is enabled by default when the preempt
532	  or irq latency tracers are enabled, as those need to swap as well
533	  and already adds the overhead (plus a lot more).
534
535config TRACE_BRANCH_PROFILING
536	bool
537	select GENERIC_TRACER
538
539choice
540	prompt "Branch Profiling"
541	default BRANCH_PROFILE_NONE
542	help
543	 The branch profiling is a software profiler. It will add hooks
544	 into the C conditionals to test which path a branch takes.
545
546	 The likely/unlikely profiler only looks at the conditions that
547	 are annotated with a likely or unlikely macro.
548
549	 The "all branch" profiler will profile every if-statement in the
550	 kernel. This profiler will also enable the likely/unlikely
551	 profiler.
552
553	 Either of the above profilers adds a bit of overhead to the system.
554	 If unsure, choose "No branch profiling".
555
556config BRANCH_PROFILE_NONE
557	bool "No branch profiling"
558	help
559	  No branch profiling. Branch profiling adds a bit of overhead.
560	  Only enable it if you want to analyse the branching behavior.
561	  Otherwise keep it disabled.
562
563config PROFILE_ANNOTATED_BRANCHES
564	bool "Trace likely/unlikely profiler"
565	select TRACE_BRANCH_PROFILING
566	help
567	  This tracer profiles all likely and unlikely macros
568	  in the kernel. It will display the results in:
569
570	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
571
572	  Note: this will add a significant overhead; only turn this
573	  on if you need to profile the system's use of these macros.
574
575config PROFILE_ALL_BRANCHES
576	bool "Profile all if conditionals" if !FORTIFY_SOURCE
577	select TRACE_BRANCH_PROFILING
578	help
579	  This tracer profiles all branch conditions. Every if ()
580	  taken in the kernel is recorded whether it hit or miss.
581	  The results will be displayed in:
582
583	  /sys/kernel/debug/tracing/trace_stat/branch_all
584
585	  This option also enables the likely/unlikely profiler.
586
587	  This configuration, when enabled, will impose a great overhead
588	  on the system. This should only be enabled when the system
589	  is to be analyzed in much detail.
590endchoice
591
592config TRACING_BRANCHES
593	bool
594	help
595	  Selected by tracers that will trace the likely and unlikely
596	  conditions. This prevents the tracers themselves from being
597	  profiled. Profiling the tracing infrastructure can only happen
598	  when the likelys and unlikelys are not being traced.
599
600config BRANCH_TRACER
601	bool "Trace likely/unlikely instances"
602	depends on TRACE_BRANCH_PROFILING
603	select TRACING_BRANCHES
604	help
605	  This traces the events of likely and unlikely condition
606	  calls in the kernel.  The difference between this and the
607	  "Trace likely/unlikely profiler" is that this is not a
608	  histogram of the callers, but actually places the calling
609	  events into a running trace buffer to see when and where the
610	  events happened, as well as their results.
611
612	  Say N if unsure.
613
614config BLK_DEV_IO_TRACE
615	bool "Support for tracing block IO actions"
616	depends on SYSFS
617	depends on BLOCK
618	select RELAY
619	select DEBUG_FS
620	select TRACEPOINTS
621	select GENERIC_TRACER
622	select STACKTRACE
623	help
624	  Say Y here if you want to be able to trace the block layer actions
625	  on a given queue. Tracing allows you to see any traffic happening
626	  on a block device queue. For more information (and the userspace
627	  support tools needed), fetch the blktrace tools from:
628
629	  git://git.kernel.dk/blktrace.git
630
631	  Tracing also is possible using the ftrace interface, e.g.:
632
633	    echo 1 > /sys/block/sda/sda1/trace/enable
634	    echo blk > /sys/kernel/debug/tracing/current_tracer
635	    cat /sys/kernel/debug/tracing/trace_pipe
636
637	  If unsure, say N.
638
639config KPROBE_EVENTS
640	depends on KPROBES
641	depends on HAVE_REGS_AND_STACK_ACCESS_API
642	bool "Enable kprobes-based dynamic events"
643	select TRACING
644	select PROBE_EVENTS
645	select DYNAMIC_EVENTS
646	default y
647	help
648	  This allows the user to add tracing events (similar to tracepoints)
649	  on the fly via the ftrace interface. See
650	  Documentation/trace/kprobetrace.rst for more details.
651
652	  Those events can be inserted wherever kprobes can probe, and record
653	  various register and memory values.
654
655	  This option is also required by perf-probe subcommand of perf tools.
656	  If you want to use perf tools, this option is strongly recommended.
657
658config KPROBE_EVENTS_ON_NOTRACE
659	bool "Do NOT protect notrace function from kprobe events"
660	depends on KPROBE_EVENTS
661	depends on DYNAMIC_FTRACE
662	default n
663	help
664	  This is only for the developers who want to debug ftrace itself
665	  using kprobe events.
666
667	  If kprobes can use ftrace instead of breakpoint, ftrace related
668	  functions are protected from kprobe-events to prevent an infinite
669	  recursion or any unexpected execution path which leads to a kernel
670	  crash.
671
672	  This option disables such protection and allows you to put kprobe
673	  events on ftrace functions for debugging ftrace by itself.
674	  Note that this might let you shoot yourself in the foot.
675
676	  If unsure, say N.
677
678config UPROBE_EVENTS
679	bool "Enable uprobes-based dynamic events"
680	depends on ARCH_SUPPORTS_UPROBES
681	depends on MMU
682	depends on PERF_EVENTS
683	select UPROBES
684	select PROBE_EVENTS
685	select DYNAMIC_EVENTS
686	select TRACING
687	default y
688	help
689	  This allows the user to add tracing events on top of userspace
690	  dynamic events (similar to tracepoints) on the fly via the trace
691	  events interface. Those events can be inserted wherever uprobes
692	  can probe, and record various registers.
693	  This option is required if you plan to use perf-probe subcommand
694	  of perf tools on user space applications.
695
696config BPF_EVENTS
697	depends on BPF_SYSCALL
698	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
699	bool
700	default y
701	help
702	  This allows the user to attach BPF programs to kprobe, uprobe, and
703	  tracepoint events.
704
705config DYNAMIC_EVENTS
706	def_bool n
707
708config PROBE_EVENTS
709	def_bool n
710
711config BPF_KPROBE_OVERRIDE
712	bool "Enable BPF programs to override a kprobed function"
713	depends on BPF_EVENTS
714	depends on FUNCTION_ERROR_INJECTION
715	default n
716	help
717	 Allows BPF to override the execution of a probed function and
718	 set a different return value.  This is used for error injection.
719
720config FTRACE_MCOUNT_RECORD
721	def_bool y
722	depends on DYNAMIC_FTRACE
723	depends on HAVE_FTRACE_MCOUNT_RECORD
724
725config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
726	bool
727	depends on FTRACE_MCOUNT_RECORD
728
729config FTRACE_MCOUNT_USE_CC
730	def_bool y
731	depends on $(cc-option,-mrecord-mcount)
732	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
733	depends on FTRACE_MCOUNT_RECORD
734
735config FTRACE_MCOUNT_USE_OBJTOOL
736	def_bool y
737	depends on HAVE_OBJTOOL_MCOUNT
738	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
739	depends on !FTRACE_MCOUNT_USE_CC
740	depends on FTRACE_MCOUNT_RECORD
741	select OBJTOOL
742
743config FTRACE_MCOUNT_USE_RECORDMCOUNT
744	def_bool y
745	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
746	depends on !FTRACE_MCOUNT_USE_CC
747	depends on !FTRACE_MCOUNT_USE_OBJTOOL
748	depends on FTRACE_MCOUNT_RECORD
749
750config TRACING_MAP
751	bool
752	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
753	help
754	  tracing_map is a special-purpose lock-free map for tracing,
755	  separated out as a stand-alone facility in order to allow it
756	  to be shared between multiple tracers.  It isn't meant to be
757	  generally used outside of that context, and is normally
758	  selected by tracers that use it.
759
760config SYNTH_EVENTS
761	bool "Synthetic trace events"
762	select TRACING
763	select DYNAMIC_EVENTS
764	default n
765	help
766	  Synthetic events are user-defined trace events that can be
767	  used to combine data from other trace events or in fact any
768	  data source.  Synthetic events can be generated indirectly
769	  via the trace() action of histogram triggers or directly
770	  by way of an in-kernel API.
771
772	  See Documentation/trace/events.rst or
773	  Documentation/trace/histogram.rst for details and examples.
774
775	  If in doubt, say N.
776
777config USER_EVENTS
778	bool "User trace events"
779	select TRACING
780	select DYNAMIC_EVENTS
781	depends on BROKEN || COMPILE_TEST # API needs to be straighten out
782	help
783	  User trace events are user-defined trace events that
784	  can be used like an existing kernel trace event.  User trace
785	  events are generated by writing to a tracefs file.  User
786	  processes can determine if their tracing events should be
787	  generated by memory mapping a tracefs file and checking for
788	  an associated byte being non-zero.
789
790	  If in doubt, say N.
791
792config HIST_TRIGGERS
793	bool "Histogram triggers"
794	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
795	select TRACING_MAP
796	select TRACING
797	select DYNAMIC_EVENTS
798	select SYNTH_EVENTS
799	default n
800	help
801	  Hist triggers allow one or more arbitrary trace event fields
802	  to be aggregated into hash tables and dumped to stdout by
803	  reading a debugfs/tracefs file.  They're useful for
804	  gathering quick and dirty (though precise) summaries of
805	  event activity as an initial guide for further investigation
806	  using more advanced tools.
807
808	  Inter-event tracing of quantities such as latencies is also
809	  supported using hist triggers under this option.
810
811	  See Documentation/trace/histogram.rst.
812	  If in doubt, say N.
813
814config TRACE_EVENT_INJECT
815	bool "Trace event injection"
816	depends on TRACING
817	help
818	  Allow user-space to inject a specific trace event into the ring
819	  buffer. This is mainly used for testing purpose.
820
821	  If unsure, say N.
822
823config TRACEPOINT_BENCHMARK
824	bool "Add tracepoint that benchmarks tracepoints"
825	help
826	 This option creates the tracepoint "benchmark:benchmark_event".
827	 When the tracepoint is enabled, it kicks off a kernel thread that
828	 goes into an infinite loop (calling cond_resched() to let other tasks
829	 run), and calls the tracepoint. Each iteration will record the time
830	 it took to write to the tracepoint and the next iteration that
831	 data will be passed to the tracepoint itself. That is, the tracepoint
832	 will report the time it took to do the previous tracepoint.
833	 The string written to the tracepoint is a static string of 128 bytes
834	 to keep the time the same. The initial string is simply a write of
835	 "START". The second string records the cold cache time of the first
836	 write which is not added to the rest of the calculations.
837
838	 As it is a tight loop, it benchmarks as hot cache. That's fine because
839	 we care most about hot paths that are probably in cache already.
840
841	 An example of the output:
842
843	      START
844	      first=3672 [COLD CACHED]
845	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
846	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
847	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
848	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
849	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
850	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
851
852
853config RING_BUFFER_BENCHMARK
854	tristate "Ring buffer benchmark stress tester"
855	depends on RING_BUFFER
856	help
857	  This option creates a test to stress the ring buffer and benchmark it.
858	  It creates its own ring buffer such that it will not interfere with
859	  any other users of the ring buffer (such as ftrace). It then creates
860	  a producer and consumer that will run for 10 seconds and sleep for
861	  10 seconds. Each interval it will print out the number of events
862	  it recorded and give a rough estimate of how long each iteration took.
863
864	  It does not disable interrupts or raise its priority, so it may be
865	  affected by processes that are running.
866
867	  If unsure, say N.
868
869config TRACE_EVAL_MAP_FILE
870       bool "Show eval mappings for trace events"
871       depends on TRACING
872       help
873	The "print fmt" of the trace events will show the enum/sizeof names
874	instead of their values. This can cause problems for user space tools
875	that use this string to parse the raw data as user space does not know
876	how to convert the string to its value.
877
878	To fix this, there's a special macro in the kernel that can be used
879	to convert an enum/sizeof into its value. If this macro is used, then
880	the print fmt strings will be converted to their values.
881
882	If something does not get converted properly, this option can be
883	used to show what enums/sizeof the kernel tried to convert.
884
885	This option is for debugging the conversions. A file is created
886	in the tracing directory called "eval_map" that will show the
887	names matched with their values and what trace event system they
888	belong too.
889
890	Normally, the mapping of the strings to values will be freed after
891	boot up or module load. With this option, they will not be freed, as
892	they are needed for the "eval_map" file. Enabling this option will
893	increase the memory footprint of the running kernel.
894
895	If unsure, say N.
896
897config FTRACE_RECORD_RECURSION
898	bool "Record functions that recurse in function tracing"
899	depends on FUNCTION_TRACER
900	help
901	  All callbacks that attach to the function tracing have some sort
902	  of protection against recursion. Even though the protection exists,
903	  it adds overhead. This option will create a file in the tracefs
904	  file system called "recursed_functions" that will list the functions
905	  that triggered a recursion.
906
907	  This will add more overhead to cases that have recursion.
908
909	  If unsure, say N
910
911config FTRACE_RECORD_RECURSION_SIZE
912	int "Max number of recursed functions to record"
913	default	128
914	depends on FTRACE_RECORD_RECURSION
915	help
916	  This defines the limit of number of functions that can be
917	  listed in the "recursed_functions" file, that lists all
918	  the functions that caused a recursion to happen.
919	  This file can be reset, but the limit can not change in
920	  size at runtime.
921
922config RING_BUFFER_RECORD_RECURSION
923	bool "Record functions that recurse in the ring buffer"
924	depends on FTRACE_RECORD_RECURSION
925	# default y, because it is coupled with FTRACE_RECORD_RECURSION
926	default y
927	help
928	  The ring buffer has its own internal recursion. Although when
929	  recursion happens it wont cause harm because of the protection,
930	  but it does cause an unwanted overhead. Enabling this option will
931	  place where recursion was detected into the ftrace "recursed_functions"
932	  file.
933
934	  This will add more overhead to cases that have recursion.
935
936config GCOV_PROFILE_FTRACE
937	bool "Enable GCOV profiling on ftrace subsystem"
938	depends on GCOV_KERNEL
939	help
940	  Enable GCOV profiling on ftrace subsystem for checking
941	  which functions/lines are tested.
942
943	  If unsure, say N.
944
945	  Note that on a kernel compiled with this config, ftrace will
946	  run significantly slower.
947
948config FTRACE_SELFTEST
949	bool
950
951config FTRACE_STARTUP_TEST
952	bool "Perform a startup test on ftrace"
953	depends on GENERIC_TRACER
954	select FTRACE_SELFTEST
955	help
956	  This option performs a series of startup tests on ftrace. On bootup
957	  a series of tests are made to verify that the tracer is
958	  functioning properly. It will do tests on all the configured
959	  tracers of ftrace.
960
961config EVENT_TRACE_STARTUP_TEST
962	bool "Run selftest on trace events"
963	depends on FTRACE_STARTUP_TEST
964	default y
965	help
966	  This option performs a test on all trace events in the system.
967	  It basically just enables each event and runs some code that
968	  will trigger events (not necessarily the event it enables)
969	  This may take some time run as there are a lot of events.
970
971config EVENT_TRACE_TEST_SYSCALLS
972	bool "Run selftest on syscall events"
973	depends on EVENT_TRACE_STARTUP_TEST
974	help
975	 This option will also enable testing every syscall event.
976	 It only enables the event and disables it and runs various loads
977	 with the event enabled. This adds a bit more time for kernel boot
978	 up since it runs this on every system call defined.
979
980	 TBD - enable a way to actually call the syscalls as we test their
981	       events
982
983config FTRACE_SORT_STARTUP_TEST
984       bool "Verify compile time sorting of ftrace functions"
985       depends on DYNAMIC_FTRACE
986       depends on BUILDTIME_MCOUNT_SORT
987       help
988	 Sorting of the mcount_loc sections that is used to find the
989	 where the ftrace knows where to patch functions for tracing
990	 and other callbacks is done at compile time. But if the sort
991	 is not done correctly, it will cause non-deterministic failures.
992	 When this is set, the sorted sections will be verified that they
993	 are in deed sorted and will warn if they are not.
994
995	 If unsure, say N
996
997config RING_BUFFER_STARTUP_TEST
998       bool "Ring buffer startup self test"
999       depends on RING_BUFFER
1000       help
1001	 Run a simple self test on the ring buffer on boot up. Late in the
1002	 kernel boot sequence, the test will start that kicks off
1003	 a thread per cpu. Each thread will write various size events
1004	 into the ring buffer. Another thread is created to send IPIs
1005	 to each of the threads, where the IPI handler will also write
1006	 to the ring buffer, to test/stress the nesting ability.
1007	 If any anomalies are discovered, a warning will be displayed
1008	 and all ring buffers will be disabled.
1009
1010	 The test runs for 10 seconds. This will slow your boot time
1011	 by at least 10 more seconds.
1012
1013	 At the end of the test, statics and more checks are done.
1014	 It will output the stats of each per cpu buffer. What
1015	 was written, the sizes, what was read, what was lost, and
1016	 other similar details.
1017
1018	 If unsure, say N
1019
1020config RING_BUFFER_VALIDATE_TIME_DELTAS
1021	bool "Verify ring buffer time stamp deltas"
1022	depends on RING_BUFFER
1023	help
1024	  This will audit the time stamps on the ring buffer sub
1025	  buffer to make sure that all the time deltas for the
1026	  events on a sub buffer matches the current time stamp.
1027	  This audit is performed for every event that is not
1028	  interrupted, or interrupting another event. A check
1029	  is also made when traversing sub buffers to make sure
1030	  that all the deltas on the previous sub buffer do not
1031	  add up to be greater than the current time stamp.
1032
1033	  NOTE: This adds significant overhead to recording of events,
1034	  and should only be used to test the logic of the ring buffer.
1035	  Do not use it on production systems.
1036
1037	  Only say Y if you understand what this does, and you
1038	  still want it enabled. Otherwise say N
1039
1040config MMIOTRACE_TEST
1041	tristate "Test module for mmiotrace"
1042	depends on MMIOTRACE && m
1043	help
1044	  This is a dumb module for testing mmiotrace. It is very dangerous
1045	  as it will write garbage to IO memory starting at a given address.
1046	  However, it should be safe to use on e.g. unused portion of VRAM.
1047
1048	  Say N, unless you absolutely know what you are doing.
1049
1050config PREEMPTIRQ_DELAY_TEST
1051	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
1052	depends on m
1053	help
1054	  Select this option to build a test module that can help test latency
1055	  tracers by executing a preempt or irq disable section with a user
1056	  configurable delay. The module busy waits for the duration of the
1057	  critical section.
1058
1059	  For example, the following invocation generates a burst of three
1060	  irq-disabled critical sections for 500us:
1061	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
1062
1063	  What's more, if you want to attach the test on the cpu which the latency
1064	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
1065	  command.
1066
1067	  If unsure, say N
1068
1069config SYNTH_EVENT_GEN_TEST
1070	tristate "Test module for in-kernel synthetic event generation"
1071	depends on SYNTH_EVENTS
1072	help
1073          This option creates a test module to check the base
1074          functionality of in-kernel synthetic event definition and
1075          generation.
1076
1077          To test, insert the module, and then check the trace buffer
1078	  for the generated sample events.
1079
1080	  If unsure, say N.
1081
1082config KPROBE_EVENT_GEN_TEST
1083	tristate "Test module for in-kernel kprobe event generation"
1084	depends on KPROBE_EVENTS
1085	help
1086          This option creates a test module to check the base
1087          functionality of in-kernel kprobe event definition.
1088
1089          To test, insert the module, and then check the trace buffer
1090	  for the generated kprobe events.
1091
1092	  If unsure, say N.
1093
1094config HIST_TRIGGERS_DEBUG
1095	bool "Hist trigger debug support"
1096	depends on HIST_TRIGGERS
1097	help
1098          Add "hist_debug" file for each event, which when read will
1099          dump out a bunch of internal details about the hist triggers
1100          defined on that event.
1101
1102          The hist_debug file serves a couple of purposes:
1103
1104            - Helps developers verify that nothing is broken.
1105
1106            - Provides educational information to support the details
1107              of the hist trigger internals as described by
1108              Documentation/trace/histogram-design.rst.
1109
1110          The hist_debug output only covers the data structures
1111          related to the histogram definitions themselves and doesn't
1112          display the internals of map buckets or variable values of
1113          running histograms.
1114
1115          If unsure, say N.
1116
1117source "kernel/trace/rv/Kconfig"
1118
1119endif # FTRACE
1120