xref: /openbmc/linux/kernel/trace/Kconfig (revision 72441cb1)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.txt
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.txt
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.txt
26
27config HAVE_FUNCTION_GRAPH_FP_TEST
28	bool
29	help
30	  See Documentation/trace/ftrace-design.txt
31
32config HAVE_FUNCTION_TRACE_MCOUNT_TEST
33	bool
34	help
35	  See Documentation/trace/ftrace-design.txt
36
37config HAVE_DYNAMIC_FTRACE
38	bool
39	help
40	  See Documentation/trace/ftrace-design.txt
41
42config HAVE_FTRACE_MCOUNT_RECORD
43	bool
44	help
45	  See Documentation/trace/ftrace-design.txt
46
47config HAVE_SYSCALL_TRACEPOINTS
48	bool
49	help
50	  See Documentation/trace/ftrace-design.txt
51
52config HAVE_C_MCOUNT_RECORD
53	bool
54	help
55	  C version of recordmcount available?
56
57config TRACER_MAX_TRACE
58	bool
59
60config RING_BUFFER
61	bool
62
63config FTRACE_NMI_ENTER
64       bool
65       depends on HAVE_FTRACE_NMI_ENTER
66       default y
67
68config EVENT_TRACING
69	select CONTEXT_SWITCH_TRACER
70	bool
71
72config CONTEXT_SWITCH_TRACER
73	bool
74
75config RING_BUFFER_ALLOW_SWAP
76	bool
77	help
78	 Allow the use of ring_buffer_swap_cpu.
79	 Adds a very slight overhead to tracing when enabled.
80
81# All tracer options should select GENERIC_TRACER. For those options that are
82# enabled by all tracers (context switch and event tracer) they select TRACING.
83# This allows those options to appear when no other tracer is selected. But the
84# options do not appear when something else selects it. We need the two options
85# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
86# hiding of the automatic options.
87
88config TRACING
89	bool
90	select DEBUG_FS
91	select RING_BUFFER
92	select STACKTRACE if STACKTRACE_SUPPORT
93	select TRACEPOINTS
94	select NOP_TRACER
95	select BINARY_PRINTF
96	select EVENT_TRACING
97
98config GENERIC_TRACER
99	bool
100	select TRACING
101
102#
103# Minimum requirements an architecture has to meet for us to
104# be able to offer generic tracing facilities:
105#
106config TRACING_SUPPORT
107	bool
108	# PPC32 has no irqflags tracing support, but it can use most of the
109	# tracers anyway, they were tested to build and work. Note that new
110	# exceptions to this list aren't welcomed, better implement the
111	# irqflags tracing for your architecture.
112	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
113	depends on STACKTRACE_SUPPORT
114	default y
115
116if TRACING_SUPPORT
117
118menuconfig FTRACE
119	bool "Tracers"
120	default y if DEBUG_KERNEL
121	help
122	  Enable the kernel tracing infrastructure.
123
124if FTRACE
125
126config FUNCTION_TRACER
127	bool "Kernel Function Tracer"
128	depends on HAVE_FUNCTION_TRACER
129	select FRAME_POINTER
130	select KALLSYMS
131	select GENERIC_TRACER
132	select CONTEXT_SWITCH_TRACER
133	help
134	  Enable the kernel to trace every kernel function. This is done
135	  by using a compiler feature to insert a small, 5-byte No-Operation
136	  instruction at the beginning of every kernel function, which NOP
137	  sequence is then dynamically patched into a tracer call when
138	  tracing is enabled by the administrator. If it's runtime disabled
139	  (the bootup default), then the overhead of the instructions is very
140	  small and not measurable even in micro-benchmarks.
141
142config FUNCTION_GRAPH_TRACER
143	bool "Kernel Function Graph Tracer"
144	depends on HAVE_FUNCTION_GRAPH_TRACER
145	depends on FUNCTION_TRACER
146	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
147	default y
148	help
149	  Enable the kernel to trace a function at both its return
150	  and its entry.
151	  Its first purpose is to trace the duration of functions and
152	  draw a call graph for each thread with some information like
153	  the return value. This is done by setting the current return
154	  address on the current task structure into a stack of calls.
155
156
157config IRQSOFF_TRACER
158	bool "Interrupts-off Latency Tracer"
159	default n
160	depends on TRACE_IRQFLAGS_SUPPORT
161	depends on !ARCH_USES_GETTIMEOFFSET
162	select TRACE_IRQFLAGS
163	select GENERIC_TRACER
164	select TRACER_MAX_TRACE
165	select RING_BUFFER_ALLOW_SWAP
166	help
167	  This option measures the time spent in irqs-off critical
168	  sections, with microsecond accuracy.
169
170	  The default measurement method is a maximum search, which is
171	  disabled by default and can be runtime (re-)started
172	  via:
173
174	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
175
176	  (Note that kernel size and overhead increase with this option
177	  enabled. This option and the preempt-off timing option can be
178	  used together or separately.)
179
180config PREEMPT_TRACER
181	bool "Preemption-off Latency Tracer"
182	default n
183	depends on !ARCH_USES_GETTIMEOFFSET
184	depends on PREEMPT
185	select GENERIC_TRACER
186	select TRACER_MAX_TRACE
187	select RING_BUFFER_ALLOW_SWAP
188	help
189	  This option measures the time spent in preemption-off critical
190	  sections, with microsecond accuracy.
191
192	  The default measurement method is a maximum search, which is
193	  disabled by default and can be runtime (re-)started
194	  via:
195
196	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
197
198	  (Note that kernel size and overhead increase with this option
199	  enabled. This option and the irqs-off timing option can be
200	  used together or separately.)
201
202config SCHED_TRACER
203	bool "Scheduling Latency Tracer"
204	select GENERIC_TRACER
205	select CONTEXT_SWITCH_TRACER
206	select TRACER_MAX_TRACE
207	help
208	  This tracer tracks the latency of the highest priority task
209	  to be scheduled in, starting from the point it has woken up.
210
211config ENABLE_DEFAULT_TRACERS
212	bool "Trace process context switches and events"
213	depends on !GENERIC_TRACER
214	select TRACING
215	help
216	  This tracer hooks to various trace points in the kernel,
217	  allowing the user to pick and choose which trace point they
218	  want to trace. It also includes the sched_switch tracer plugin.
219
220config FTRACE_SYSCALLS
221	bool "Trace syscalls"
222	depends on HAVE_SYSCALL_TRACEPOINTS
223	select GENERIC_TRACER
224	select KALLSYMS
225	help
226	  Basic tracer to catch the syscall entry and exit events.
227
228config TRACE_BRANCH_PROFILING
229	bool
230	select GENERIC_TRACER
231
232choice
233	prompt "Branch Profiling"
234	default BRANCH_PROFILE_NONE
235	help
236	 The branch profiling is a software profiler. It will add hooks
237	 into the C conditionals to test which path a branch takes.
238
239	 The likely/unlikely profiler only looks at the conditions that
240	 are annotated with a likely or unlikely macro.
241
242	 The "all branch" profiler will profile every if-statement in the
243	 kernel. This profiler will also enable the likely/unlikely
244	 profiler.
245
246	 Either of the above profilers adds a bit of overhead to the system.
247	 If unsure, choose "No branch profiling".
248
249config BRANCH_PROFILE_NONE
250	bool "No branch profiling"
251	help
252	  No branch profiling. Branch profiling adds a bit of overhead.
253	  Only enable it if you want to analyse the branching behavior.
254	  Otherwise keep it disabled.
255
256config PROFILE_ANNOTATED_BRANCHES
257	bool "Trace likely/unlikely profiler"
258	select TRACE_BRANCH_PROFILING
259	help
260	  This tracer profiles all the the likely and unlikely macros
261	  in the kernel. It will display the results in:
262
263	  /sys/kernel/debug/tracing/profile_annotated_branch
264
265	  Note: this will add a significant overhead; only turn this
266	  on if you need to profile the system's use of these macros.
267
268config PROFILE_ALL_BRANCHES
269	bool "Profile all if conditionals"
270	select TRACE_BRANCH_PROFILING
271	help
272	  This tracer profiles all branch conditions. Every if ()
273	  taken in the kernel is recorded whether it hit or miss.
274	  The results will be displayed in:
275
276	  /sys/kernel/debug/tracing/profile_branch
277
278	  This option also enables the likely/unlikely profiler.
279
280	  This configuration, when enabled, will impose a great overhead
281	  on the system. This should only be enabled when the system
282	  is to be analyzed in much detail.
283endchoice
284
285config TRACING_BRANCHES
286	bool
287	help
288	  Selected by tracers that will trace the likely and unlikely
289	  conditions. This prevents the tracers themselves from being
290	  profiled. Profiling the tracing infrastructure can only happen
291	  when the likelys and unlikelys are not being traced.
292
293config BRANCH_TRACER
294	bool "Trace likely/unlikely instances"
295	depends on TRACE_BRANCH_PROFILING
296	select TRACING_BRANCHES
297	help
298	  This traces the events of likely and unlikely condition
299	  calls in the kernel.  The difference between this and the
300	  "Trace likely/unlikely profiler" is that this is not a
301	  histogram of the callers, but actually places the calling
302	  events into a running trace buffer to see when and where the
303	  events happened, as well as their results.
304
305	  Say N if unsure.
306
307config STACK_TRACER
308	bool "Trace max stack"
309	depends on HAVE_FUNCTION_TRACER
310	select FUNCTION_TRACER
311	select STACKTRACE
312	select KALLSYMS
313	help
314	  This special tracer records the maximum stack footprint of the
315	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
316
317	  This tracer works by hooking into every function call that the
318	  kernel executes, and keeping a maximum stack depth value and
319	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
320	  then it will not have any overhead while the stack tracer
321	  is disabled.
322
323	  To enable the stack tracer on bootup, pass in 'stacktrace'
324	  on the kernel command line.
325
326	  The stack tracer can also be enabled or disabled via the
327	  sysctl kernel.stack_tracer_enabled
328
329	  Say N if unsure.
330
331config BLK_DEV_IO_TRACE
332	bool "Support for tracing block IO actions"
333	depends on SYSFS
334	depends on BLOCK
335	select RELAY
336	select DEBUG_FS
337	select TRACEPOINTS
338	select GENERIC_TRACER
339	select STACKTRACE
340	help
341	  Say Y here if you want to be able to trace the block layer actions
342	  on a given queue. Tracing allows you to see any traffic happening
343	  on a block device queue. For more information (and the userspace
344	  support tools needed), fetch the blktrace tools from:
345
346	  git://git.kernel.dk/blktrace.git
347
348	  Tracing also is possible using the ftrace interface, e.g.:
349
350	    echo 1 > /sys/block/sda/sda1/trace/enable
351	    echo blk > /sys/kernel/debug/tracing/current_tracer
352	    cat /sys/kernel/debug/tracing/trace_pipe
353
354	  If unsure, say N.
355
356config KPROBE_EVENT
357	depends on KPROBES
358	depends on HAVE_REGS_AND_STACK_ACCESS_API
359	bool "Enable kprobes-based dynamic events"
360	select TRACING
361	default y
362	help
363	  This allows the user to add tracing events (similar to tracepoints)
364	  on the fly via the ftrace interface. See
365	  Documentation/trace/kprobetrace.txt for more details.
366
367	  Those events can be inserted wherever kprobes can probe, and record
368	  various register and memory values.
369
370	  This option is also required by perf-probe subcommand of perf tools.
371	  If you want to use perf tools, this option is strongly recommended.
372
373config DYNAMIC_FTRACE
374	bool "enable/disable ftrace tracepoints dynamically"
375	depends on FUNCTION_TRACER
376	depends on HAVE_DYNAMIC_FTRACE
377	default y
378	help
379          This option will modify all the calls to ftrace dynamically
380	  (will patch them out of the binary image and replace them
381	  with a No-Op instruction) as they are called. A table is
382	  created to dynamically enable them again.
383
384	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
385	  otherwise has native performance as long as no tracing is active.
386
387	  The changes to the code are done by a kernel thread that
388	  wakes up once a second and checks to see if any ftrace calls
389	  were made. If so, it runs stop_machine (stops all CPUS)
390	  and modifies the code to jump over the call to ftrace.
391
392config FUNCTION_PROFILER
393	bool "Kernel function profiler"
394	depends on FUNCTION_TRACER
395	default n
396	help
397	  This option enables the kernel function profiler. A file is created
398	  in debugfs called function_profile_enabled which defaults to zero.
399	  When a 1 is echoed into this file profiling begins, and when a
400	  zero is entered, profiling stops. A "functions" file is created in
401	  the trace_stats directory; this file shows the list of functions that
402	  have been hit and their counters.
403
404	  If in doubt, say N.
405
406config FTRACE_MCOUNT_RECORD
407	def_bool y
408	depends on DYNAMIC_FTRACE
409	depends on HAVE_FTRACE_MCOUNT_RECORD
410
411config FTRACE_SELFTEST
412	bool
413
414config FTRACE_STARTUP_TEST
415	bool "Perform a startup test on ftrace"
416	depends on GENERIC_TRACER
417	select FTRACE_SELFTEST
418	help
419	  This option performs a series of startup tests on ftrace. On bootup
420	  a series of tests are made to verify that the tracer is
421	  functioning properly. It will do tests on all the configured
422	  tracers of ftrace.
423
424config EVENT_TRACE_TEST_SYSCALLS
425	bool "Run selftest on syscall events"
426	depends on FTRACE_STARTUP_TEST
427	help
428	 This option will also enable testing every syscall event.
429	 It only enables the event and disables it and runs various loads
430	 with the event enabled. This adds a bit more time for kernel boot
431	 up since it runs this on every system call defined.
432
433	 TBD - enable a way to actually call the syscalls as we test their
434	       events
435
436config MMIOTRACE
437	bool "Memory mapped IO tracing"
438	depends on HAVE_MMIOTRACE_SUPPORT && PCI
439	select GENERIC_TRACER
440	help
441	  Mmiotrace traces Memory Mapped I/O access and is meant for
442	  debugging and reverse engineering. It is called from the ioremap
443	  implementation and works via page faults. Tracing is disabled by
444	  default and can be enabled at run-time.
445
446	  See Documentation/trace/mmiotrace.txt.
447	  If you are not helping to develop drivers, say N.
448
449config MMIOTRACE_TEST
450	tristate "Test module for mmiotrace"
451	depends on MMIOTRACE && m
452	help
453	  This is a dumb module for testing mmiotrace. It is very dangerous
454	  as it will write garbage to IO memory starting at a given address.
455	  However, it should be safe to use on e.g. unused portion of VRAM.
456
457	  Say N, unless you absolutely know what you are doing.
458
459config RING_BUFFER_BENCHMARK
460	tristate "Ring buffer benchmark stress tester"
461	depends on RING_BUFFER
462	help
463	  This option creates a test to stress the ring buffer and benchmark it.
464	  It creates its own ring buffer such that it will not interfere with
465	  any other users of the ring buffer (such as ftrace). It then creates
466	  a producer and consumer that will run for 10 seconds and sleep for
467	  10 seconds. Each interval it will print out the number of events
468	  it recorded and give a rough estimate of how long each iteration took.
469
470	  It does not disable interrupts or raise its priority, so it may be
471	  affected by processes that are running.
472
473	  If unsure, say N.
474
475endif # FTRACE
476
477endif # TRACING_SUPPORT
478
479