xref: /openbmc/linux/kernel/trace/Kconfig (revision b04b4f78)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14
15config HAVE_FUNCTION_TRACER
16	bool
17
18config HAVE_FUNCTION_GRAPH_TRACER
19	bool
20
21config HAVE_FUNCTION_TRACE_MCOUNT_TEST
22	bool
23	help
24	 This gets selected when the arch tests the function_trace_stop
25	 variable at the mcount call site. Otherwise, this variable
26	 is tested by the called function.
27
28config HAVE_DYNAMIC_FTRACE
29	bool
30
31config HAVE_FTRACE_MCOUNT_RECORD
32	bool
33
34config HAVE_HW_BRANCH_TRACER
35	bool
36
37config HAVE_FTRACE_SYSCALLS
38	bool
39
40config TRACER_MAX_TRACE
41	bool
42
43config RING_BUFFER
44	bool
45
46config FTRACE_NMI_ENTER
47       bool
48       depends on HAVE_FTRACE_NMI_ENTER
49       default y
50
51config TRACING
52	bool
53	select DEBUG_FS
54	select RING_BUFFER
55	select STACKTRACE if STACKTRACE_SUPPORT
56	select TRACEPOINTS
57	select NOP_TRACER
58	select BINARY_PRINTF
59
60#
61# Minimum requirements an architecture has to meet for us to
62# be able to offer generic tracing facilities:
63#
64config TRACING_SUPPORT
65	bool
66	# PPC32 has no irqflags tracing support, but it can use most of the
67	# tracers anyway, they were tested to build and work. Note that new
68	# exceptions to this list aren't welcomed, better implement the
69	# irqflags tracing for your architecture.
70	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
71	depends on STACKTRACE_SUPPORT
72	default y
73
74if TRACING_SUPPORT
75
76menu "Tracers"
77
78config FUNCTION_TRACER
79	bool "Kernel Function Tracer"
80	depends on HAVE_FUNCTION_TRACER
81	select FRAME_POINTER
82	select KALLSYMS
83	select TRACING
84	select CONTEXT_SWITCH_TRACER
85	help
86	  Enable the kernel to trace every kernel function. This is done
87	  by using a compiler feature to insert a small, 5-byte No-Operation
88	  instruction to the beginning of every kernel function, which NOP
89	  sequence is then dynamically patched into a tracer call when
90	  tracing is enabled by the administrator. If it's runtime disabled
91	  (the bootup default), then the overhead of the instructions is very
92	  small and not measurable even in micro-benchmarks.
93
94config FUNCTION_GRAPH_TRACER
95	bool "Kernel Function Graph Tracer"
96	depends on HAVE_FUNCTION_GRAPH_TRACER
97	depends on FUNCTION_TRACER
98	default y
99	help
100	  Enable the kernel to trace a function at both its return
101	  and its entry.
102	  Its first purpose is to trace the duration of functions and
103	  draw a call graph for each thread with some information like
104	  the return value. This is done by setting the current return
105	  address on the current task structure into a stack of calls.
106
107config IRQSOFF_TRACER
108	bool "Interrupts-off Latency Tracer"
109	default n
110	depends on TRACE_IRQFLAGS_SUPPORT
111	depends on GENERIC_TIME
112	select TRACE_IRQFLAGS
113	select TRACING
114	select TRACER_MAX_TRACE
115	help
116	  This option measures the time spent in irqs-off critical
117	  sections, with microsecond accuracy.
118
119	  The default measurement method is a maximum search, which is
120	  disabled by default and can be runtime (re-)started
121	  via:
122
123	      echo 0 > /debugfs/tracing/tracing_max_latency
124
125	  (Note that kernel size and overhead increases with this option
126	  enabled. This option and the preempt-off timing option can be
127	  used together or separately.)
128
129config PREEMPT_TRACER
130	bool "Preemption-off Latency Tracer"
131	default n
132	depends on GENERIC_TIME
133	depends on PREEMPT
134	select TRACING
135	select TRACER_MAX_TRACE
136	help
137	  This option measures the time spent in preemption off critical
138	  sections, with microsecond accuracy.
139
140	  The default measurement method is a maximum search, which is
141	  disabled by default and can be runtime (re-)started
142	  via:
143
144	      echo 0 > /debugfs/tracing/tracing_max_latency
145
146	  (Note that kernel size and overhead increases with this option
147	  enabled. This option and the irqs-off timing option can be
148	  used together or separately.)
149
150config SYSPROF_TRACER
151	bool "Sysprof Tracer"
152	depends on X86
153	select TRACING
154	select CONTEXT_SWITCH_TRACER
155	help
156	  This tracer provides the trace needed by the 'Sysprof' userspace
157	  tool.
158
159config SCHED_TRACER
160	bool "Scheduling Latency Tracer"
161	select TRACING
162	select CONTEXT_SWITCH_TRACER
163	select TRACER_MAX_TRACE
164	help
165	  This tracer tracks the latency of the highest priority task
166	  to be scheduled in, starting from the point it has woken up.
167
168config CONTEXT_SWITCH_TRACER
169	bool "Trace process context switches"
170	select TRACING
171	select MARKERS
172	help
173	  This tracer gets called from the context switch and records
174	  all switching of tasks.
175
176config EVENT_TRACER
177	bool "Trace various events in the kernel"
178	select TRACING
179	help
180	  This tracer hooks to various trace points in the kernel
181	  allowing the user to pick and choose which trace point they
182	  want to trace.
183
184config FTRACE_SYSCALLS
185	bool "Trace syscalls"
186	depends on HAVE_FTRACE_SYSCALLS
187	select TRACING
188	select KALLSYMS
189	help
190	  Basic tracer to catch the syscall entry and exit events.
191
192config BOOT_TRACER
193	bool "Trace boot initcalls"
194	select TRACING
195	select CONTEXT_SWITCH_TRACER
196	help
197	  This tracer helps developers to optimize boot times: it records
198	  the timings of the initcalls and traces key events and the identity
199	  of tasks that can cause boot delays, such as context-switches.
200
201	  Its aim is to be parsed by the /scripts/bootgraph.pl tool to
202	  produce pretty graphics about boot inefficiencies, giving a visual
203	  representation of the delays during initcalls - but the raw
204	  /debug/tracing/trace text output is readable too.
205
206	  You must pass in ftrace=initcall to the kernel command line
207	  to enable this on bootup.
208
209config TRACE_BRANCH_PROFILING
210	bool "Trace likely/unlikely profiler"
211	select TRACING
212	help
213	  This tracer profiles all the the likely and unlikely macros
214	  in the kernel. It will display the results in:
215
216	  /debugfs/tracing/profile_annotated_branch
217
218	  Note: this will add a significant overhead, only turn this
219	  on if you need to profile the system's use of these macros.
220
221	  Say N if unsure.
222
223config PROFILE_ALL_BRANCHES
224	bool "Profile all if conditionals"
225	depends on TRACE_BRANCH_PROFILING
226	help
227	  This tracer profiles all branch conditions. Every if ()
228	  taken in the kernel is recorded whether it hit or miss.
229	  The results will be displayed in:
230
231	  /debugfs/tracing/profile_branch
232
233	  This configuration, when enabled, will impose a great overhead
234	  on the system. This should only be enabled when the system
235	  is to be analyzed
236
237	  Say N if unsure.
238
239config TRACING_BRANCHES
240	bool
241	help
242	  Selected by tracers that will trace the likely and unlikely
243	  conditions. This prevents the tracers themselves from being
244	  profiled. Profiling the tracing infrastructure can only happen
245	  when the likelys and unlikelys are not being traced.
246
247config BRANCH_TRACER
248	bool "Trace likely/unlikely instances"
249	depends on TRACE_BRANCH_PROFILING
250	select TRACING_BRANCHES
251	help
252	  This traces the events of likely and unlikely condition
253	  calls in the kernel.  The difference between this and the
254	  "Trace likely/unlikely profiler" is that this is not a
255	  histogram of the callers, but actually places the calling
256	  events into a running trace buffer to see when and where the
257	  events happened, as well as their results.
258
259	  Say N if unsure.
260
261config POWER_TRACER
262	bool "Trace power consumption behavior"
263	depends on X86
264	select TRACING
265	help
266	  This tracer helps developers to analyze and optimize the kernels
267	  power management decisions, specifically the C-state and P-state
268	  behavior.
269
270
271config STACK_TRACER
272	bool "Trace max stack"
273	depends on HAVE_FUNCTION_TRACER
274	select FUNCTION_TRACER
275	select STACKTRACE
276	select KALLSYMS
277	help
278	  This special tracer records the maximum stack footprint of the
279	  kernel and displays it in debugfs/tracing/stack_trace.
280
281	  This tracer works by hooking into every function call that the
282	  kernel executes, and keeping a maximum stack depth value and
283	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
284	  then it will not have any overhead while the stack tracer
285	  is disabled.
286
287	  To enable the stack tracer on bootup, pass in 'stacktrace'
288	  on the kernel command line.
289
290	  The stack tracer can also be enabled or disabled via the
291	  sysctl kernel.stack_tracer_enabled
292
293	  Say N if unsure.
294
295config HW_BRANCH_TRACER
296	depends on HAVE_HW_BRANCH_TRACER
297	bool "Trace hw branches"
298	select TRACING
299	help
300	  This tracer records all branches on the system in a circular
301	  buffer giving access to the last N branches for each cpu.
302
303config KMEMTRACE
304	bool "Trace SLAB allocations"
305	select TRACING
306	help
307	  kmemtrace provides tracing for slab allocator functions, such as
308	  kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
309	  data is then fed to the userspace application in order to analyse
310	  allocation hotspots, internal fragmentation and so on, making it
311	  possible to see how well an allocator performs, as well as debug
312	  and profile kernel code.
313
314	  This requires an userspace application to use. See
315	  Documentation/trace/kmemtrace.txt for more information.
316
317	  Saying Y will make the kernel somewhat larger and slower. However,
318	  if you disable kmemtrace at run-time or boot-time, the performance
319	  impact is minimal (depending on the arch the kernel is built for).
320
321	  If unsure, say N.
322
323config WORKQUEUE_TRACER
324	bool "Trace workqueues"
325	select TRACING
326	help
327	  The workqueue tracer provides some statistical informations
328          about each cpu workqueue thread such as the number of the
329          works inserted and executed since their creation. It can help
330          to evaluate the amount of work each of them have to perform.
331          For example it can help a developer to decide whether he should
332          choose a per cpu workqueue instead of a singlethreaded one.
333
334config BLK_DEV_IO_TRACE
335	bool "Support for tracing block io actions"
336	depends on SYSFS
337	depends on BLOCK
338	select RELAY
339	select DEBUG_FS
340	select TRACEPOINTS
341	select TRACING
342	select STACKTRACE
343	help
344	  Say Y here if you want to be able to trace the block layer actions
345	  on a given queue. Tracing allows you to see any traffic happening
346	  on a block device queue. For more information (and the userspace
347	  support tools needed), fetch the blktrace tools from:
348
349	  git://git.kernel.dk/blktrace.git
350
351	  Tracing also is possible using the ftrace interface, e.g.:
352
353	    echo 1 > /sys/block/sda/sda1/trace/enable
354	    echo blk > /sys/kernel/debug/tracing/current_tracer
355	    cat /sys/kernel/debug/tracing/trace_pipe
356
357	  If unsure, say N.
358
359config DYNAMIC_FTRACE
360	bool "enable/disable ftrace tracepoints dynamically"
361	depends on FUNCTION_TRACER
362	depends on HAVE_DYNAMIC_FTRACE
363	default y
364	help
365         This option will modify all the calls to ftrace dynamically
366	 (will patch them out of the binary image and replaces them
367	 with a No-Op instruction) as they are called. A table is
368	 created to dynamically enable them again.
369
370	 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
371	 has native performance as long as no tracing is active.
372
373	 The changes to the code are done by a kernel thread that
374	 wakes up once a second and checks to see if any ftrace calls
375	 were made. If so, it runs stop_machine (stops all CPUS)
376	 and modifies the code to jump over the call to ftrace.
377
378config FTRACE_MCOUNT_RECORD
379	def_bool y
380	depends on DYNAMIC_FTRACE
381	depends on HAVE_FTRACE_MCOUNT_RECORD
382
383config FTRACE_SELFTEST
384	bool
385
386config FTRACE_STARTUP_TEST
387	bool "Perform a startup test on ftrace"
388	depends on TRACING
389	select FTRACE_SELFTEST
390	help
391	  This option performs a series of startup tests on ftrace. On bootup
392	  a series of tests are made to verify that the tracer is
393	  functioning properly. It will do tests on all the configured
394	  tracers of ftrace.
395
396config MMIOTRACE
397	bool "Memory mapped IO tracing"
398	depends on HAVE_MMIOTRACE_SUPPORT && PCI
399	select TRACING
400	help
401	  Mmiotrace traces Memory Mapped I/O access and is meant for
402	  debugging and reverse engineering. It is called from the ioremap
403	  implementation and works via page faults. Tracing is disabled by
404	  default and can be enabled at run-time.
405
406	  See Documentation/trace/mmiotrace.txt.
407	  If you are not helping to develop drivers, say N.
408
409config MMIOTRACE_TEST
410	tristate "Test module for mmiotrace"
411	depends on MMIOTRACE && m
412	help
413	  This is a dumb module for testing mmiotrace. It is very dangerous
414	  as it will write garbage to IO memory starting at a given address.
415	  However, it should be safe to use on e.g. unused portion of VRAM.
416
417	  Say N, unless you absolutely know what you are doing.
418
419endmenu
420
421endif # TRACING_SUPPORT
422
423