xref: /openbmc/linux/kernel/trace/Kconfig (revision b3015fe4)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.rst
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.rst
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.rst
26
27config HAVE_DYNAMIC_FTRACE
28	bool
29	help
30	  See Documentation/trace/ftrace-design.rst
31
32config HAVE_DYNAMIC_FTRACE_WITH_REGS
33	bool
34
35config HAVE_FTRACE_MCOUNT_RECORD
36	bool
37	help
38	  See Documentation/trace/ftrace-design.rst
39
40config HAVE_SYSCALL_TRACEPOINTS
41	bool
42	help
43	  See Documentation/trace/ftrace-design.rst
44
45config HAVE_FENTRY
46	bool
47	help
48	  Arch supports the gcc options -pg with -mfentry
49
50config HAVE_NOP_MCOUNT
51	bool
52	help
53	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
54
55config HAVE_C_RECORDMCOUNT
56	bool
57	help
58	  C version of recordmcount available?
59
60config TRACER_MAX_TRACE
61	bool
62
63config TRACE_CLOCK
64	bool
65
66config RING_BUFFER
67	bool
68	select TRACE_CLOCK
69	select IRQ_WORK
70
71config FTRACE_NMI_ENTER
72       bool
73       depends on HAVE_FTRACE_NMI_ENTER
74       default y
75
76config EVENT_TRACING
77	select CONTEXT_SWITCH_TRACER
78        select GLOB
79	bool
80
81config CONTEXT_SWITCH_TRACER
82	bool
83
84config RING_BUFFER_ALLOW_SWAP
85	bool
86	help
87	 Allow the use of ring_buffer_swap_cpu.
88	 Adds a very slight overhead to tracing when enabled.
89
90config PREEMPTIRQ_TRACEPOINTS
91	bool
92	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
93	select TRACING
94	default y
95	help
96	  Create preempt/irq toggle tracepoints if needed, so that other parts
97	  of the kernel can use them to generate or add hooks to them.
98
99# All tracer options should select GENERIC_TRACER. For those options that are
100# enabled by all tracers (context switch and event tracer) they select TRACING.
101# This allows those options to appear when no other tracer is selected. But the
102# options do not appear when something else selects it. We need the two options
103# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
104# hiding of the automatic options.
105
106config TRACING
107	bool
108	select DEBUG_FS
109	select RING_BUFFER
110	select STACKTRACE if STACKTRACE_SUPPORT
111	select TRACEPOINTS
112	select NOP_TRACER
113	select BINARY_PRINTF
114	select EVENT_TRACING
115	select TRACE_CLOCK
116
117config GENERIC_TRACER
118	bool
119	select TRACING
120
121#
122# Minimum requirements an architecture has to meet for us to
123# be able to offer generic tracing facilities:
124#
125config TRACING_SUPPORT
126	bool
127	depends on TRACE_IRQFLAGS_SUPPORT
128	depends on STACKTRACE_SUPPORT
129	default y
130
131if TRACING_SUPPORT
132
133menuconfig FTRACE
134	bool "Tracers"
135	default y if DEBUG_KERNEL
136	help
137	  Enable the kernel tracing infrastructure.
138
139if FTRACE
140
141config FUNCTION_TRACER
142	bool "Kernel Function Tracer"
143	depends on HAVE_FUNCTION_TRACER
144	select KALLSYMS
145	select GENERIC_TRACER
146	select CONTEXT_SWITCH_TRACER
147	select GLOB
148	select TASKS_RCU if PREEMPT
149	help
150	  Enable the kernel to trace every kernel function. This is done
151	  by using a compiler feature to insert a small, 5-byte No-Operation
152	  instruction at the beginning of every kernel function, which NOP
153	  sequence is then dynamically patched into a tracer call when
154	  tracing is enabled by the administrator. If it's runtime disabled
155	  (the bootup default), then the overhead of the instructions is very
156	  small and not measurable even in micro-benchmarks.
157
158config FUNCTION_GRAPH_TRACER
159	bool "Kernel Function Graph Tracer"
160	depends on HAVE_FUNCTION_GRAPH_TRACER
161	depends on FUNCTION_TRACER
162	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
163	default y
164	help
165	  Enable the kernel to trace a function at both its return
166	  and its entry.
167	  Its first purpose is to trace the duration of functions and
168	  draw a call graph for each thread with some information like
169	  the return value. This is done by setting the current return
170	  address on the current task structure into a stack of calls.
171
172config TRACE_PREEMPT_TOGGLE
173	bool
174	help
175	  Enables hooks which will be called when preemption is first disabled,
176	  and last enabled.
177
178config PREEMPTIRQ_EVENTS
179	bool "Enable trace events for preempt and irq disable/enable"
180	select TRACE_IRQFLAGS
181	select TRACE_PREEMPT_TOGGLE if PREEMPT
182	select GENERIC_TRACER
183	default n
184	help
185	  Enable tracing of disable and enable events for preemption and irqs.
186
187config IRQSOFF_TRACER
188	bool "Interrupts-off Latency Tracer"
189	default n
190	depends on TRACE_IRQFLAGS_SUPPORT
191	depends on !ARCH_USES_GETTIMEOFFSET
192	select TRACE_IRQFLAGS
193	select GENERIC_TRACER
194	select TRACER_MAX_TRACE
195	select RING_BUFFER_ALLOW_SWAP
196	select TRACER_SNAPSHOT
197	select TRACER_SNAPSHOT_PER_CPU_SWAP
198	help
199	  This option measures the time spent in irqs-off critical
200	  sections, with microsecond accuracy.
201
202	  The default measurement method is a maximum search, which is
203	  disabled by default and can be runtime (re-)started
204	  via:
205
206	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
207
208	  (Note that kernel size and overhead increase with this option
209	  enabled. This option and the preempt-off timing option can be
210	  used together or separately.)
211
212config PREEMPT_TRACER
213	bool "Preemption-off Latency Tracer"
214	default n
215	depends on !ARCH_USES_GETTIMEOFFSET
216	depends on PREEMPT
217	select GENERIC_TRACER
218	select TRACER_MAX_TRACE
219	select RING_BUFFER_ALLOW_SWAP
220	select TRACER_SNAPSHOT
221	select TRACER_SNAPSHOT_PER_CPU_SWAP
222	select TRACE_PREEMPT_TOGGLE
223	help
224	  This option measures the time spent in preemption-off critical
225	  sections, with microsecond accuracy.
226
227	  The default measurement method is a maximum search, which is
228	  disabled by default and can be runtime (re-)started
229	  via:
230
231	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
232
233	  (Note that kernel size and overhead increase with this option
234	  enabled. This option and the irqs-off timing option can be
235	  used together or separately.)
236
237config SCHED_TRACER
238	bool "Scheduling Latency Tracer"
239	select GENERIC_TRACER
240	select CONTEXT_SWITCH_TRACER
241	select TRACER_MAX_TRACE
242	select TRACER_SNAPSHOT
243	help
244	  This tracer tracks the latency of the highest priority task
245	  to be scheduled in, starting from the point it has woken up.
246
247config HWLAT_TRACER
248	bool "Tracer to detect hardware latencies (like SMIs)"
249	select GENERIC_TRACER
250	help
251	 This tracer, when enabled will create one or more kernel threads,
252	 depending on what the cpumask file is set to, which each thread
253	 spinning in a loop looking for interruptions caused by
254	 something other than the kernel. For example, if a
255	 System Management Interrupt (SMI) takes a noticeable amount of
256	 time, this tracer will detect it. This is useful for testing
257	 if a system is reliable for Real Time tasks.
258
259	 Some files are created in the tracing directory when this
260	 is enabled:
261
262	   hwlat_detector/width   - time in usecs for how long to spin for
263	   hwlat_detector/window  - time in usecs between the start of each
264				     iteration
265
266	 A kernel thread is created that will spin with interrupts disabled
267	 for "width" microseconds in every "window" cycle. It will not spin
268	 for "window - width" microseconds, where the system can
269	 continue to operate.
270
271	 The output will appear in the trace and trace_pipe files.
272
273	 When the tracer is not running, it has no affect on the system,
274	 but when it is running, it can cause the system to be
275	 periodically non responsive. Do not run this tracer on a
276	 production system.
277
278	 To enable this tracer, echo in "hwlat" into the current_tracer
279	 file. Every time a latency is greater than tracing_thresh, it will
280	 be recorded into the ring buffer.
281
282config ENABLE_DEFAULT_TRACERS
283	bool "Trace process context switches and events"
284	depends on !GENERIC_TRACER
285	select TRACING
286	help
287	  This tracer hooks to various trace points in the kernel,
288	  allowing the user to pick and choose which trace point they
289	  want to trace. It also includes the sched_switch tracer plugin.
290
291config FTRACE_SYSCALLS
292	bool "Trace syscalls"
293	depends on HAVE_SYSCALL_TRACEPOINTS
294	select GENERIC_TRACER
295	select KALLSYMS
296	help
297	  Basic tracer to catch the syscall entry and exit events.
298
299config TRACER_SNAPSHOT
300	bool "Create a snapshot trace buffer"
301	select TRACER_MAX_TRACE
302	help
303	  Allow tracing users to take snapshot of the current buffer using the
304	  ftrace interface, e.g.:
305
306	      echo 1 > /sys/kernel/debug/tracing/snapshot
307	      cat snapshot
308
309config TRACER_SNAPSHOT_PER_CPU_SWAP
310        bool "Allow snapshot to swap per CPU"
311	depends on TRACER_SNAPSHOT
312	select RING_BUFFER_ALLOW_SWAP
313	help
314	  Allow doing a snapshot of a single CPU buffer instead of a
315	  full swap (all buffers). If this is set, then the following is
316	  allowed:
317
318	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
319
320	  After which, only the tracing buffer for CPU 2 was swapped with
321	  the main tracing buffer, and the other CPU buffers remain the same.
322
323	  When this is enabled, this adds a little more overhead to the
324	  trace recording, as it needs to add some checks to synchronize
325	  recording with swaps. But this does not affect the performance
326	  of the overall system. This is enabled by default when the preempt
327	  or irq latency tracers are enabled, as those need to swap as well
328	  and already adds the overhead (plus a lot more).
329
330config TRACE_BRANCH_PROFILING
331	bool
332	select GENERIC_TRACER
333
334choice
335	prompt "Branch Profiling"
336	default BRANCH_PROFILE_NONE
337	help
338	 The branch profiling is a software profiler. It will add hooks
339	 into the C conditionals to test which path a branch takes.
340
341	 The likely/unlikely profiler only looks at the conditions that
342	 are annotated with a likely or unlikely macro.
343
344	 The "all branch" profiler will profile every if-statement in the
345	 kernel. This profiler will also enable the likely/unlikely
346	 profiler.
347
348	 Either of the above profilers adds a bit of overhead to the system.
349	 If unsure, choose "No branch profiling".
350
351config BRANCH_PROFILE_NONE
352	bool "No branch profiling"
353	help
354	  No branch profiling. Branch profiling adds a bit of overhead.
355	  Only enable it if you want to analyse the branching behavior.
356	  Otherwise keep it disabled.
357
358config PROFILE_ANNOTATED_BRANCHES
359	bool "Trace likely/unlikely profiler"
360	select TRACE_BRANCH_PROFILING
361	help
362	  This tracer profiles all likely and unlikely macros
363	  in the kernel. It will display the results in:
364
365	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
366
367	  Note: this will add a significant overhead; only turn this
368	  on if you need to profile the system's use of these macros.
369
370config PROFILE_ALL_BRANCHES
371	bool "Profile all if conditionals" if !FORTIFY_SOURCE
372	select TRACE_BRANCH_PROFILING
373	imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED  # avoid false positives
374	help
375	  This tracer profiles all branch conditions. Every if ()
376	  taken in the kernel is recorded whether it hit or miss.
377	  The results will be displayed in:
378
379	  /sys/kernel/debug/tracing/trace_stat/branch_all
380
381	  This option also enables the likely/unlikely profiler.
382
383	  This configuration, when enabled, will impose a great overhead
384	  on the system. This should only be enabled when the system
385	  is to be analyzed in much detail.
386endchoice
387
388config TRACING_BRANCHES
389	bool
390	help
391	  Selected by tracers that will trace the likely and unlikely
392	  conditions. This prevents the tracers themselves from being
393	  profiled. Profiling the tracing infrastructure can only happen
394	  when the likelys and unlikelys are not being traced.
395
396config BRANCH_TRACER
397	bool "Trace likely/unlikely instances"
398	depends on TRACE_BRANCH_PROFILING
399	select TRACING_BRANCHES
400	help
401	  This traces the events of likely and unlikely condition
402	  calls in the kernel.  The difference between this and the
403	  "Trace likely/unlikely profiler" is that this is not a
404	  histogram of the callers, but actually places the calling
405	  events into a running trace buffer to see when and where the
406	  events happened, as well as their results.
407
408	  Say N if unsure.
409
410config STACK_TRACER
411	bool "Trace max stack"
412	depends on HAVE_FUNCTION_TRACER
413	select FUNCTION_TRACER
414	select STACKTRACE
415	select KALLSYMS
416	help
417	  This special tracer records the maximum stack footprint of the
418	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
419
420	  This tracer works by hooking into every function call that the
421	  kernel executes, and keeping a maximum stack depth value and
422	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
423	  then it will not have any overhead while the stack tracer
424	  is disabled.
425
426	  To enable the stack tracer on bootup, pass in 'stacktrace'
427	  on the kernel command line.
428
429	  The stack tracer can also be enabled or disabled via the
430	  sysctl kernel.stack_tracer_enabled
431
432	  Say N if unsure.
433
434config BLK_DEV_IO_TRACE
435	bool "Support for tracing block IO actions"
436	depends on SYSFS
437	depends on BLOCK
438	select RELAY
439	select DEBUG_FS
440	select TRACEPOINTS
441	select GENERIC_TRACER
442	select STACKTRACE
443	help
444	  Say Y here if you want to be able to trace the block layer actions
445	  on a given queue. Tracing allows you to see any traffic happening
446	  on a block device queue. For more information (and the userspace
447	  support tools needed), fetch the blktrace tools from:
448
449	  git://git.kernel.dk/blktrace.git
450
451	  Tracing also is possible using the ftrace interface, e.g.:
452
453	    echo 1 > /sys/block/sda/sda1/trace/enable
454	    echo blk > /sys/kernel/debug/tracing/current_tracer
455	    cat /sys/kernel/debug/tracing/trace_pipe
456
457	  If unsure, say N.
458
459config KPROBE_EVENTS
460	depends on KPROBES
461	depends on HAVE_REGS_AND_STACK_ACCESS_API
462	bool "Enable kprobes-based dynamic events"
463	select TRACING
464	select PROBE_EVENTS
465	select DYNAMIC_EVENTS
466	default y
467	help
468	  This allows the user to add tracing events (similar to tracepoints)
469	  on the fly via the ftrace interface. See
470	  Documentation/trace/kprobetrace.rst for more details.
471
472	  Those events can be inserted wherever kprobes can probe, and record
473	  various register and memory values.
474
475	  This option is also required by perf-probe subcommand of perf tools.
476	  If you want to use perf tools, this option is strongly recommended.
477
478config KPROBE_EVENTS_ON_NOTRACE
479	bool "Do NOT protect notrace function from kprobe events"
480	depends on KPROBE_EVENTS
481	depends on KPROBES_ON_FTRACE
482	default n
483	help
484	  This is only for the developers who want to debug ftrace itself
485	  using kprobe events.
486
487	  If kprobes can use ftrace instead of breakpoint, ftrace related
488	  functions are protected from kprobe-events to prevent an infinit
489	  recursion or any unexpected execution path which leads to a kernel
490	  crash.
491
492	  This option disables such protection and allows you to put kprobe
493	  events on ftrace functions for debugging ftrace by itself.
494	  Note that this might let you shoot yourself in the foot.
495
496	  If unsure, say N.
497
498config UPROBE_EVENTS
499	bool "Enable uprobes-based dynamic events"
500	depends on ARCH_SUPPORTS_UPROBES
501	depends on MMU
502	depends on PERF_EVENTS
503	select UPROBES
504	select PROBE_EVENTS
505	select DYNAMIC_EVENTS
506	select TRACING
507	default y
508	help
509	  This allows the user to add tracing events on top of userspace
510	  dynamic events (similar to tracepoints) on the fly via the trace
511	  events interface. Those events can be inserted wherever uprobes
512	  can probe, and record various registers.
513	  This option is required if you plan to use perf-probe subcommand
514	  of perf tools on user space applications.
515
516config BPF_EVENTS
517	depends on BPF_SYSCALL
518	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
519	bool
520	default y
521	help
522	  This allows the user to attach BPF programs to kprobe events.
523
524config DYNAMIC_EVENTS
525	def_bool n
526
527config PROBE_EVENTS
528	def_bool n
529
530config DYNAMIC_FTRACE
531	bool "enable/disable function tracing dynamically"
532	depends on FUNCTION_TRACER
533	depends on HAVE_DYNAMIC_FTRACE
534	default y
535	help
536	  This option will modify all the calls to function tracing
537	  dynamically (will patch them out of the binary image and
538	  replace them with a No-Op instruction) on boot up. During
539	  compile time, a table is made of all the locations that ftrace
540	  can function trace, and this table is linked into the kernel
541	  image. When this is enabled, functions can be individually
542	  enabled, and the functions not enabled will not affect
543	  performance of the system.
544
545	  See the files in /sys/kernel/debug/tracing:
546	    available_filter_functions
547	    set_ftrace_filter
548	    set_ftrace_notrace
549
550	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
551	  otherwise has native performance as long as no tracing is active.
552
553config DYNAMIC_FTRACE_WITH_REGS
554	def_bool y
555	depends on DYNAMIC_FTRACE
556	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
557
558config FUNCTION_PROFILER
559	bool "Kernel function profiler"
560	depends on FUNCTION_TRACER
561	default n
562	help
563	  This option enables the kernel function profiler. A file is created
564	  in debugfs called function_profile_enabled which defaults to zero.
565	  When a 1 is echoed into this file profiling begins, and when a
566	  zero is entered, profiling stops. A "functions" file is created in
567	  the trace_stat directory; this file shows the list of functions that
568	  have been hit and their counters.
569
570	  If in doubt, say N.
571
572config BPF_KPROBE_OVERRIDE
573	bool "Enable BPF programs to override a kprobed function"
574	depends on BPF_EVENTS
575	depends on FUNCTION_ERROR_INJECTION
576	default n
577	help
578	 Allows BPF to override the execution of a probed function and
579	 set a different return value.  This is used for error injection.
580
581config FTRACE_MCOUNT_RECORD
582	def_bool y
583	depends on DYNAMIC_FTRACE
584	depends on HAVE_FTRACE_MCOUNT_RECORD
585
586config FTRACE_SELFTEST
587	bool
588
589config FTRACE_STARTUP_TEST
590	bool "Perform a startup test on ftrace"
591	depends on GENERIC_TRACER
592	select FTRACE_SELFTEST
593	help
594	  This option performs a series of startup tests on ftrace. On bootup
595	  a series of tests are made to verify that the tracer is
596	  functioning properly. It will do tests on all the configured
597	  tracers of ftrace.
598
599config EVENT_TRACE_STARTUP_TEST
600	bool "Run selftest on trace events"
601	depends on FTRACE_STARTUP_TEST
602	default y
603	help
604	  This option performs a test on all trace events in the system.
605	  It basically just enables each event and runs some code that
606	  will trigger events (not necessarily the event it enables)
607	  This may take some time run as there are a lot of events.
608
609config EVENT_TRACE_TEST_SYSCALLS
610	bool "Run selftest on syscall events"
611	depends on EVENT_TRACE_STARTUP_TEST
612	help
613	 This option will also enable testing every syscall event.
614	 It only enables the event and disables it and runs various loads
615	 with the event enabled. This adds a bit more time for kernel boot
616	 up since it runs this on every system call defined.
617
618	 TBD - enable a way to actually call the syscalls as we test their
619	       events
620
621config MMIOTRACE
622	bool "Memory mapped IO tracing"
623	depends on HAVE_MMIOTRACE_SUPPORT && PCI
624	select GENERIC_TRACER
625	help
626	  Mmiotrace traces Memory Mapped I/O access and is meant for
627	  debugging and reverse engineering. It is called from the ioremap
628	  implementation and works via page faults. Tracing is disabled by
629	  default and can be enabled at run-time.
630
631	  See Documentation/trace/mmiotrace.rst.
632	  If you are not helping to develop drivers, say N.
633
634config TRACING_MAP
635	bool
636	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
637	help
638	  tracing_map is a special-purpose lock-free map for tracing,
639	  separated out as a stand-alone facility in order to allow it
640	  to be shared between multiple tracers.  It isn't meant to be
641	  generally used outside of that context, and is normally
642	  selected by tracers that use it.
643
644config HIST_TRIGGERS
645	bool "Histogram triggers"
646	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
647	select TRACING_MAP
648	select TRACING
649	select DYNAMIC_EVENTS
650	default n
651	help
652	  Hist triggers allow one or more arbitrary trace event fields
653	  to be aggregated into hash tables and dumped to stdout by
654	  reading a debugfs/tracefs file.  They're useful for
655	  gathering quick and dirty (though precise) summaries of
656	  event activity as an initial guide for further investigation
657	  using more advanced tools.
658
659	  Inter-event tracing of quantities such as latencies is also
660	  supported using hist triggers under this option.
661
662	  See Documentation/trace/histogram.rst.
663	  If in doubt, say N.
664
665config MMIOTRACE_TEST
666	tristate "Test module for mmiotrace"
667	depends on MMIOTRACE && m
668	help
669	  This is a dumb module for testing mmiotrace. It is very dangerous
670	  as it will write garbage to IO memory starting at a given address.
671	  However, it should be safe to use on e.g. unused portion of VRAM.
672
673	  Say N, unless you absolutely know what you are doing.
674
675config TRACEPOINT_BENCHMARK
676        bool "Add tracepoint that benchmarks tracepoints"
677	help
678	 This option creates the tracepoint "benchmark:benchmark_event".
679	 When the tracepoint is enabled, it kicks off a kernel thread that
680	 goes into an infinite loop (calling cond_sched() to let other tasks
681	 run), and calls the tracepoint. Each iteration will record the time
682	 it took to write to the tracepoint and the next iteration that
683	 data will be passed to the tracepoint itself. That is, the tracepoint
684	 will report the time it took to do the previous tracepoint.
685	 The string written to the tracepoint is a static string of 128 bytes
686	 to keep the time the same. The initial string is simply a write of
687	 "START". The second string records the cold cache time of the first
688	 write which is not added to the rest of the calculations.
689
690	 As it is a tight loop, it benchmarks as hot cache. That's fine because
691	 we care most about hot paths that are probably in cache already.
692
693	 An example of the output:
694
695	      START
696	      first=3672 [COLD CACHED]
697	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
698	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
699	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
700	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
701	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
702	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
703
704
705config RING_BUFFER_BENCHMARK
706	tristate "Ring buffer benchmark stress tester"
707	depends on RING_BUFFER
708	help
709	  This option creates a test to stress the ring buffer and benchmark it.
710	  It creates its own ring buffer such that it will not interfere with
711	  any other users of the ring buffer (such as ftrace). It then creates
712	  a producer and consumer that will run for 10 seconds and sleep for
713	  10 seconds. Each interval it will print out the number of events
714	  it recorded and give a rough estimate of how long each iteration took.
715
716	  It does not disable interrupts or raise its priority, so it may be
717	  affected by processes that are running.
718
719	  If unsure, say N.
720
721config RING_BUFFER_STARTUP_TEST
722       bool "Ring buffer startup self test"
723       depends on RING_BUFFER
724       help
725         Run a simple self test on the ring buffer on boot up. Late in the
726	 kernel boot sequence, the test will start that kicks off
727	 a thread per cpu. Each thread will write various size events
728	 into the ring buffer. Another thread is created to send IPIs
729	 to each of the threads, where the IPI handler will also write
730	 to the ring buffer, to test/stress the nesting ability.
731	 If any anomalies are discovered, a warning will be displayed
732	 and all ring buffers will be disabled.
733
734	 The test runs for 10 seconds. This will slow your boot time
735	 by at least 10 more seconds.
736
737	 At the end of the test, statics and more checks are done.
738	 It will output the stats of each per cpu buffer. What
739	 was written, the sizes, what was read, what was lost, and
740	 other similar details.
741
742	 If unsure, say N
743
744config PREEMPTIRQ_DELAY_TEST
745	tristate "Preempt / IRQ disable delay thread to test latency tracers"
746	depends on m
747	help
748	  Select this option to build a test module that can help test latency
749	  tracers by executing a preempt or irq disable section with a user
750	  configurable delay. The module busy waits for the duration of the
751	  critical section.
752
753	  For example, the following invocation forces a one-time irq-disabled
754	  critical section for 500us:
755	  modprobe preemptirq_delay_test test_mode=irq delay=500000
756
757	  If unsure, say N
758
759config TRACE_EVAL_MAP_FILE
760       bool "Show eval mappings for trace events"
761       depends on TRACING
762       help
763	The "print fmt" of the trace events will show the enum/sizeof names
764	instead	of their values. This can cause problems for user space tools
765	that use this string to parse the raw data as user space does not know
766	how to convert the string to its value.
767
768	To fix this, there's a special macro in the kernel that can be used
769	to convert an enum/sizeof into its value. If this macro is used, then
770	the print fmt strings will be converted to their values.
771
772	If something does not get converted properly, this option can be
773	used to show what enums/sizeof the kernel tried to convert.
774
775	This option is for debugging the conversions. A file is created
776	in the tracing directory called "eval_map" that will show the
777	names matched with their values and what trace event system they
778	belong too.
779
780	Normally, the mapping of the strings to values will be freed after
781	boot up or module load. With this option, they will not be freed, as
782	they are needed for the "eval_map" file. Enabling this option will
783	increase the memory footprint of the running kernel.
784
785	If unsure, say N
786
787config GCOV_PROFILE_FTRACE
788	bool "Enable GCOV profiling on ftrace subsystem"
789	depends on GCOV_KERNEL
790	help
791	  Enable GCOV profiling on ftrace subsystem for checking
792	  which functions/lines are tested.
793
794	  If unsure, say N.
795
796	  Note that on a kernel compiled with this config, ftrace will
797	  run significantly slower.
798
799endif # FTRACE
800
801endif # TRACING_SUPPORT
802
803