xref: /openbmc/linux/kernel/trace/Kconfig (revision 78c99ba1)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14
15config HAVE_FUNCTION_TRACER
16	bool
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20
21config HAVE_FUNCTION_TRACE_MCOUNT_TEST
22	bool
23	help
24	 This gets selected when the arch tests the function_trace_stop
25	 variable at the mcount call site. Otherwise, this variable
26	 is tested by the called function.
27
28config HAVE_DYNAMIC_FTRACE
29	bool
30
31config HAVE_FTRACE_MCOUNT_RECORD
32	bool
33
34config HAVE_HW_BRANCH_TRACER
35	bool
36
37config HAVE_FTRACE_SYSCALLS
38	bool
39
40config TRACER_MAX_TRACE
41	bool
42
43config RING_BUFFER
44	bool
45
46config FTRACE_NMI_ENTER
47       bool
48       depends on HAVE_FTRACE_NMI_ENTER
49       default y
50
51config EVENT_TRACING
52	select CONTEXT_SWITCH_TRACER
53	bool
54
55config CONTEXT_SWITCH_TRACER
56	select MARKERS
57	bool
58
59# All tracer options should select GENERIC_TRACER. For those options that are
60# enabled by all tracers (context switch and event tracer) they select TRACING.
61# This allows those options to appear when no other tracer is selected. But the
62# options do not appear when something else selects it. We need the two options
63# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
64# hidding of the automatic options options.
65
66config TRACING
67	bool
68	select DEBUG_FS
69	select RING_BUFFER
70	select STACKTRACE if STACKTRACE_SUPPORT
71	select TRACEPOINTS
72	select NOP_TRACER
73	select BINARY_PRINTF
74	select EVENT_TRACING
75
76config GENERIC_TRACER
77	bool
78	select TRACING
79
80#
81# Minimum requirements an architecture has to meet for us to
82# be able to offer generic tracing facilities:
83#
84config TRACING_SUPPORT
85	bool
86	# PPC32 has no irqflags tracing support, but it can use most of the
87	# tracers anyway, they were tested to build and work. Note that new
88	# exceptions to this list aren't welcomed, better implement the
89	# irqflags tracing for your architecture.
90	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
91	depends on STACKTRACE_SUPPORT
92	default y
93
94if TRACING_SUPPORT
95
96menuconfig FTRACE
97	bool "Tracers"
98	default y if DEBUG_KERNEL
99	help
100	 Enable the kernel tracing infrastructure.
101
102if FTRACE
103
104config FUNCTION_TRACER
105	bool "Kernel Function Tracer"
106	depends on HAVE_FUNCTION_TRACER
107	select FRAME_POINTER
108	select KALLSYMS
109	select GENERIC_TRACER
110	select CONTEXT_SWITCH_TRACER
111	help
112	  Enable the kernel to trace every kernel function. This is done
113	  by using a compiler feature to insert a small, 5-byte No-Operation
114	  instruction to the beginning of every kernel function, which NOP
115	  sequence is then dynamically patched into a tracer call when
116	  tracing is enabled by the administrator. If it's runtime disabled
117	  (the bootup default), then the overhead of the instructions is very
118	  small and not measurable even in micro-benchmarks.
119
120config FUNCTION_GRAPH_TRACER
121	bool "Kernel Function Graph Tracer"
122	depends on HAVE_FUNCTION_GRAPH_TRACER
123	depends on FUNCTION_TRACER
124	default y
125	help
126	  Enable the kernel to trace a function at both its return
127	  and its entry.
128	  Its first purpose is to trace the duration of functions and
129	  draw a call graph for each thread with some information like
130	  the return value. This is done by setting the current return
131	  address on the current task structure into a stack of calls.
132
133
134config IRQSOFF_TRACER
135	bool "Interrupts-off Latency Tracer"
136	default n
137	depends on TRACE_IRQFLAGS_SUPPORT
138	depends on GENERIC_TIME
139	select TRACE_IRQFLAGS
140	select GENERIC_TRACER
141	select TRACER_MAX_TRACE
142	help
143	  This option measures the time spent in irqs-off critical
144	  sections, with microsecond accuracy.
145
146	  The default measurement method is a maximum search, which is
147	  disabled by default and can be runtime (re-)started
148	  via:
149
150	      echo 0 > /debugfs/tracing/tracing_max_latency
151
152	  (Note that kernel size and overhead increases with this option
153	  enabled. This option and the preempt-off timing option can be
154	  used together or separately.)
155
156config PREEMPT_TRACER
157	bool "Preemption-off Latency Tracer"
158	default n
159	depends on GENERIC_TIME
160	depends on PREEMPT
161	select GENERIC_TRACER
162	select TRACER_MAX_TRACE
163	help
164	  This option measures the time spent in preemption off critical
165	  sections, with microsecond accuracy.
166
167	  The default measurement method is a maximum search, which is
168	  disabled by default and can be runtime (re-)started
169	  via:
170
171	      echo 0 > /debugfs/tracing/tracing_max_latency
172
173	  (Note that kernel size and overhead increases with this option
174	  enabled. This option and the irqs-off timing option can be
175	  used together or separately.)
176
177config SYSPROF_TRACER
178	bool "Sysprof Tracer"
179	depends on X86
180	select GENERIC_TRACER
181	select CONTEXT_SWITCH_TRACER
182	help
183	  This tracer provides the trace needed by the 'Sysprof' userspace
184	  tool.
185
186config SCHED_TRACER
187	bool "Scheduling Latency Tracer"
188	select GENERIC_TRACER
189	select CONTEXT_SWITCH_TRACER
190	select TRACER_MAX_TRACE
191	help
192	  This tracer tracks the latency of the highest priority task
193	  to be scheduled in, starting from the point it has woken up.
194
195config ENABLE_DEFAULT_TRACERS
196	bool "Trace process context switches and events"
197	depends on !GENERIC_TRACER
198	select TRACING
199	help
200	  This tracer hooks to various trace points in the kernel
201	  allowing the user to pick and choose which trace point they
202	  want to trace. It also includes the sched_switch tracer plugin.
203
204config FTRACE_SYSCALLS
205	bool "Trace syscalls"
206	depends on HAVE_FTRACE_SYSCALLS
207	select GENERIC_TRACER
208	select KALLSYMS
209	help
210	  Basic tracer to catch the syscall entry and exit events.
211
212config BOOT_TRACER
213	bool "Trace boot initcalls"
214	select GENERIC_TRACER
215	select CONTEXT_SWITCH_TRACER
216	help
217	  This tracer helps developers to optimize boot times: it records
218	  the timings of the initcalls and traces key events and the identity
219	  of tasks that can cause boot delays, such as context-switches.
220
221	  Its aim is to be parsed by the /scripts/bootgraph.pl tool to
222	  produce pretty graphics about boot inefficiencies, giving a visual
223	  representation of the delays during initcalls - but the raw
224	  /debug/tracing/trace text output is readable too.
225
226	  You must pass in ftrace=initcall to the kernel command line
227	  to enable this on bootup.
228
229config TRACE_BRANCH_PROFILING
230	bool
231	select GENERIC_TRACER
232
233choice
234	prompt "Branch Profiling"
235	default BRANCH_PROFILE_NONE
236	help
237	 The branch profiling is a software profiler. It will add hooks
238	 into the C conditionals to test which path a branch takes.
239
240	 The likely/unlikely profiler only looks at the conditions that
241	 are annotated with a likely or unlikely macro.
242
243	 The "all branch" profiler will profile every if statement in the
244	 kernel. This profiler will also enable the likely/unlikely
245	 profiler as well.
246
247	 Either of the above profilers add a bit of overhead to the system.
248	 If unsure choose "No branch profiling".
249
250config BRANCH_PROFILE_NONE
251	bool "No branch profiling"
252	help
253	 No branch profiling. Branch profiling adds a bit of overhead.
254	 Only enable it if you want to analyse the branching behavior.
255	 Otherwise keep it disabled.
256
257config PROFILE_ANNOTATED_BRANCHES
258	bool "Trace likely/unlikely profiler"
259	select TRACE_BRANCH_PROFILING
260	help
261	  This tracer profiles all the the likely and unlikely macros
262	  in the kernel. It will display the results in:
263
264	  /debugfs/tracing/profile_annotated_branch
265
266	  Note: this will add a significant overhead, only turn this
267	  on if you need to profile the system's use of these macros.
268
269config PROFILE_ALL_BRANCHES
270	bool "Profile all if conditionals"
271	select TRACE_BRANCH_PROFILING
272	help
273	  This tracer profiles all branch conditions. Every if ()
274	  taken in the kernel is recorded whether it hit or miss.
275	  The results will be displayed in:
276
277	  /debugfs/tracing/profile_branch
278
279	  This option also enables the likely/unlikely profiler.
280
281	  This configuration, when enabled, will impose a great overhead
282	  on the system. This should only be enabled when the system
283	  is to be analyzed
284endchoice
285
286config TRACING_BRANCHES
287	bool
288	help
289	  Selected by tracers that will trace the likely and unlikely
290	  conditions. This prevents the tracers themselves from being
291	  profiled. Profiling the tracing infrastructure can only happen
292	  when the likelys and unlikelys are not being traced.
293
294config BRANCH_TRACER
295	bool "Trace likely/unlikely instances"
296	depends on TRACE_BRANCH_PROFILING
297	select TRACING_BRANCHES
298	help
299	  This traces the events of likely and unlikely condition
300	  calls in the kernel.  The difference between this and the
301	  "Trace likely/unlikely profiler" is that this is not a
302	  histogram of the callers, but actually places the calling
303	  events into a running trace buffer to see when and where the
304	  events happened, as well as their results.
305
306	  Say N if unsure.
307
308config POWER_TRACER
309	bool "Trace power consumption behavior"
310	depends on X86
311	select GENERIC_TRACER
312	help
313	  This tracer helps developers to analyze and optimize the kernels
314	  power management decisions, specifically the C-state and P-state
315	  behavior.
316
317
318config STACK_TRACER
319	bool "Trace max stack"
320	depends on HAVE_FUNCTION_TRACER
321	select FUNCTION_TRACER
322	select STACKTRACE
323	select KALLSYMS
324	help
325	  This special tracer records the maximum stack footprint of the
326	  kernel and displays it in debugfs/tracing/stack_trace.
327
328	  This tracer works by hooking into every function call that the
329	  kernel executes, and keeping a maximum stack depth value and
330	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
331	  then it will not have any overhead while the stack tracer
332	  is disabled.
333
334	  To enable the stack tracer on bootup, pass in 'stacktrace'
335	  on the kernel command line.
336
337	  The stack tracer can also be enabled or disabled via the
338	  sysctl kernel.stack_tracer_enabled
339
340	  Say N if unsure.
341
342config HW_BRANCH_TRACER
343	depends on HAVE_HW_BRANCH_TRACER
344	bool "Trace hw branches"
345	select GENERIC_TRACER
346	help
347	  This tracer records all branches on the system in a circular
348	  buffer giving access to the last N branches for each cpu.
349
350config KMEMTRACE
351	bool "Trace SLAB allocations"
352	select GENERIC_TRACER
353	help
354	  kmemtrace provides tracing for slab allocator functions, such as
355	  kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
356	  data is then fed to the userspace application in order to analyse
357	  allocation hotspots, internal fragmentation and so on, making it
358	  possible to see how well an allocator performs, as well as debug
359	  and profile kernel code.
360
361	  This requires an userspace application to use. See
362	  Documentation/trace/kmemtrace.txt for more information.
363
364	  Saying Y will make the kernel somewhat larger and slower. However,
365	  if you disable kmemtrace at run-time or boot-time, the performance
366	  impact is minimal (depending on the arch the kernel is built for).
367
368	  If unsure, say N.
369
370config WORKQUEUE_TRACER
371	bool "Trace workqueues"
372	select GENERIC_TRACER
373	help
374	  The workqueue tracer provides some statistical informations
375          about each cpu workqueue thread such as the number of the
376          works inserted and executed since their creation. It can help
377          to evaluate the amount of work each of them have to perform.
378          For example it can help a developer to decide whether he should
379          choose a per cpu workqueue instead of a singlethreaded one.
380
381config BLK_DEV_IO_TRACE
382	bool "Support for tracing block io actions"
383	depends on SYSFS
384	depends on BLOCK
385	select RELAY
386	select DEBUG_FS
387	select TRACEPOINTS
388	select GENERIC_TRACER
389	select STACKTRACE
390	help
391	  Say Y here if you want to be able to trace the block layer actions
392	  on a given queue. Tracing allows you to see any traffic happening
393	  on a block device queue. For more information (and the userspace
394	  support tools needed), fetch the blktrace tools from:
395
396	  git://git.kernel.dk/blktrace.git
397
398	  Tracing also is possible using the ftrace interface, e.g.:
399
400	    echo 1 > /sys/block/sda/sda1/trace/enable
401	    echo blk > /sys/kernel/debug/tracing/current_tracer
402	    cat /sys/kernel/debug/tracing/trace_pipe
403
404	  If unsure, say N.
405
406config DYNAMIC_FTRACE
407	bool "enable/disable ftrace tracepoints dynamically"
408	depends on FUNCTION_TRACER
409	depends on HAVE_DYNAMIC_FTRACE
410	default y
411	help
412         This option will modify all the calls to ftrace dynamically
413	 (will patch them out of the binary image and replaces them
414	 with a No-Op instruction) as they are called. A table is
415	 created to dynamically enable them again.
416
417	 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
418	 has native performance as long as no tracing is active.
419
420	 The changes to the code are done by a kernel thread that
421	 wakes up once a second and checks to see if any ftrace calls
422	 were made. If so, it runs stop_machine (stops all CPUS)
423	 and modifies the code to jump over the call to ftrace.
424
425config FUNCTION_PROFILER
426	bool "Kernel function profiler"
427	depends on FUNCTION_TRACER
428	default n
429	help
430	 This option enables the kernel function profiler. A file is created
431	 in debugfs called function_profile_enabled which defaults to zero.
432	 When a 1 is echoed into this file profiling begins, and when a
433	 zero is entered, profiling stops. A file in the trace_stats
434	 directory called functions, that show the list of functions that
435	 have been hit and their counters.
436
437	 If in doubt, say N
438
439config FTRACE_MCOUNT_RECORD
440	def_bool y
441	depends on DYNAMIC_FTRACE
442	depends on HAVE_FTRACE_MCOUNT_RECORD
443
444config FTRACE_SELFTEST
445	bool
446
447config FTRACE_STARTUP_TEST
448	bool "Perform a startup test on ftrace"
449	depends on GENERIC_TRACER
450	select FTRACE_SELFTEST
451	help
452	  This option performs a series of startup tests on ftrace. On bootup
453	  a series of tests are made to verify that the tracer is
454	  functioning properly. It will do tests on all the configured
455	  tracers of ftrace.
456
457config MMIOTRACE
458	bool "Memory mapped IO tracing"
459	depends on HAVE_MMIOTRACE_SUPPORT && PCI
460	select GENERIC_TRACER
461	help
462	  Mmiotrace traces Memory Mapped I/O access and is meant for
463	  debugging and reverse engineering. It is called from the ioremap
464	  implementation and works via page faults. Tracing is disabled by
465	  default and can be enabled at run-time.
466
467	  See Documentation/trace/mmiotrace.txt.
468	  If you are not helping to develop drivers, say N.
469
470config MMIOTRACE_TEST
471	tristate "Test module for mmiotrace"
472	depends on MMIOTRACE && m
473	help
474	  This is a dumb module for testing mmiotrace. It is very dangerous
475	  as it will write garbage to IO memory starting at a given address.
476	  However, it should be safe to use on e.g. unused portion of VRAM.
477
478	  Say N, unless you absolutely know what you are doing.
479
480config RING_BUFFER_BENCHMARK
481	tristate "Ring buffer benchmark stress tester"
482	depends on RING_BUFFER
483	help
484	  This option creates a test to stress the ring buffer and bench mark it.
485	  It creates its own ring buffer such that it will not interfer with
486	  any other users of the ring buffer (such as ftrace). It then creates
487	  a producer and consumer that will run for 10 seconds and sleep for
488	  10 seconds. Each interval it will print out the number of events
489	  it recorded and give a rough estimate of how long each iteration took.
490
491	  It does not disable interrupts or raise its priority, so it may be
492	  affected by processes that are running.
493
494	  If unsure, say N
495
496endif # FTRACE
497
498endif # TRACING_SUPPORT
499
500