xref: /openbmc/linux/kernel/trace/Kconfig (revision f3a8b664)
1#
2# Architectures that offer an FUNCTION_TRACER implementation should
3#  select HAVE_FUNCTION_TRACER:
4#
5
6config USER_STACKTRACE_SUPPORT
7	bool
8
9config NOP_TRACER
10	bool
11
12config HAVE_FTRACE_NMI_ENTER
13	bool
14	help
15	  See Documentation/trace/ftrace-design.txt
16
17config HAVE_FUNCTION_TRACER
18	bool
19	help
20	  See Documentation/trace/ftrace-design.txt
21
22config HAVE_FUNCTION_GRAPH_TRACER
23	bool
24	help
25	  See Documentation/trace/ftrace-design.txt
26
27config HAVE_DYNAMIC_FTRACE
28	bool
29	help
30	  See Documentation/trace/ftrace-design.txt
31
32config HAVE_DYNAMIC_FTRACE_WITH_REGS
33	bool
34
35config HAVE_FTRACE_MCOUNT_RECORD
36	bool
37	help
38	  See Documentation/trace/ftrace-design.txt
39
40config HAVE_SYSCALL_TRACEPOINTS
41	bool
42	help
43	  See Documentation/trace/ftrace-design.txt
44
45config HAVE_FENTRY
46	bool
47	help
48	  Arch supports the gcc options -pg with -mfentry
49
50config HAVE_C_RECORDMCOUNT
51	bool
52	help
53	  C version of recordmcount available?
54
55config TRACER_MAX_TRACE
56	bool
57
58config TRACE_CLOCK
59	bool
60
61config RING_BUFFER
62	bool
63	select TRACE_CLOCK
64	select IRQ_WORK
65
66config FTRACE_NMI_ENTER
67       bool
68       depends on HAVE_FTRACE_NMI_ENTER
69       default y
70
71config EVENT_TRACING
72	select CONTEXT_SWITCH_TRACER
73	bool
74
75config CONTEXT_SWITCH_TRACER
76	bool
77
78config RING_BUFFER_ALLOW_SWAP
79	bool
80	help
81	 Allow the use of ring_buffer_swap_cpu.
82	 Adds a very slight overhead to tracing when enabled.
83
84# All tracer options should select GENERIC_TRACER. For those options that are
85# enabled by all tracers (context switch and event tracer) they select TRACING.
86# This allows those options to appear when no other tracer is selected. But the
87# options do not appear when something else selects it. We need the two options
88# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
89# hiding of the automatic options.
90
91config TRACING
92	bool
93	select DEBUG_FS
94	select RING_BUFFER
95	select STACKTRACE if STACKTRACE_SUPPORT
96	select TRACEPOINTS
97	select NOP_TRACER
98	select BINARY_PRINTF
99	select EVENT_TRACING
100	select TRACE_CLOCK
101
102config GENERIC_TRACER
103	bool
104	select TRACING
105
106#
107# Minimum requirements an architecture has to meet for us to
108# be able to offer generic tracing facilities:
109#
110config TRACING_SUPPORT
111	bool
112	# PPC32 has no irqflags tracing support, but it can use most of the
113	# tracers anyway, they were tested to build and work. Note that new
114	# exceptions to this list aren't welcomed, better implement the
115	# irqflags tracing for your architecture.
116	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
117	depends on STACKTRACE_SUPPORT
118	default y
119
120if TRACING_SUPPORT
121
122menuconfig FTRACE
123	bool "Tracers"
124	default y if DEBUG_KERNEL
125	help
126	  Enable the kernel tracing infrastructure.
127
128if FTRACE
129
130config FUNCTION_TRACER
131	bool "Kernel Function Tracer"
132	depends on HAVE_FUNCTION_TRACER
133	select KALLSYMS
134	select GENERIC_TRACER
135	select CONTEXT_SWITCH_TRACER
136	help
137	  Enable the kernel to trace every kernel function. This is done
138	  by using a compiler feature to insert a small, 5-byte No-Operation
139	  instruction at the beginning of every kernel function, which NOP
140	  sequence is then dynamically patched into a tracer call when
141	  tracing is enabled by the administrator. If it's runtime disabled
142	  (the bootup default), then the overhead of the instructions is very
143	  small and not measurable even in micro-benchmarks.
144
145config FUNCTION_GRAPH_TRACER
146	bool "Kernel Function Graph Tracer"
147	depends on HAVE_FUNCTION_GRAPH_TRACER
148	depends on FUNCTION_TRACER
149	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
150	default y
151	help
152	  Enable the kernel to trace a function at both its return
153	  and its entry.
154	  Its first purpose is to trace the duration of functions and
155	  draw a call graph for each thread with some information like
156	  the return value. This is done by setting the current return
157	  address on the current task structure into a stack of calls.
158
159
160config IRQSOFF_TRACER
161	bool "Interrupts-off Latency Tracer"
162	default n
163	depends on TRACE_IRQFLAGS_SUPPORT
164	depends on !ARCH_USES_GETTIMEOFFSET
165	select TRACE_IRQFLAGS
166	select GENERIC_TRACER
167	select TRACER_MAX_TRACE
168	select RING_BUFFER_ALLOW_SWAP
169	select TRACER_SNAPSHOT
170	select TRACER_SNAPSHOT_PER_CPU_SWAP
171	help
172	  This option measures the time spent in irqs-off critical
173	  sections, with microsecond accuracy.
174
175	  The default measurement method is a maximum search, which is
176	  disabled by default and can be runtime (re-)started
177	  via:
178
179	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
180
181	  (Note that kernel size and overhead increase with this option
182	  enabled. This option and the preempt-off timing option can be
183	  used together or separately.)
184
185config PREEMPT_TRACER
186	bool "Preemption-off Latency Tracer"
187	default n
188	depends on !ARCH_USES_GETTIMEOFFSET
189	depends on PREEMPT
190	select GENERIC_TRACER
191	select TRACER_MAX_TRACE
192	select RING_BUFFER_ALLOW_SWAP
193	select TRACER_SNAPSHOT
194	select TRACER_SNAPSHOT_PER_CPU_SWAP
195	help
196	  This option measures the time spent in preemption-off critical
197	  sections, with microsecond accuracy.
198
199	  The default measurement method is a maximum search, which is
200	  disabled by default and can be runtime (re-)started
201	  via:
202
203	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
204
205	  (Note that kernel size and overhead increase with this option
206	  enabled. This option and the irqs-off timing option can be
207	  used together or separately.)
208
209config SCHED_TRACER
210	bool "Scheduling Latency Tracer"
211	select GENERIC_TRACER
212	select CONTEXT_SWITCH_TRACER
213	select TRACER_MAX_TRACE
214	select TRACER_SNAPSHOT
215	help
216	  This tracer tracks the latency of the highest priority task
217	  to be scheduled in, starting from the point it has woken up.
218
219config HWLAT_TRACER
220	bool "Tracer to detect hardware latencies (like SMIs)"
221	select GENERIC_TRACER
222	help
223	 This tracer, when enabled will create one or more kernel threads,
224	 depening on what the cpumask file is set to, which each thread
225	 spinning in a loop looking for interruptions caused by
226	 something other than the kernel. For example, if a
227	 System Management Interrupt (SMI) takes a noticeable amount of
228	 time, this tracer will detect it. This is useful for testing
229	 if a system is reliable for Real Time tasks.
230
231	 Some files are created in the tracing directory when this
232	 is enabled:
233
234	   hwlat_detector/width   - time in usecs for how long to spin for
235	   hwlat_detector/window  - time in usecs between the start of each
236				     iteration
237
238	 A kernel thread is created that will spin with interrupts disabled
239	 for "width" microseconds in every "widow" cycle. It will not spin
240	 for "window - width" microseconds, where the system can
241	 continue to operate.
242
243	 The output will appear in the trace and trace_pipe files.
244
245	 When the tracer is not running, it has no affect on the system,
246	 but when it is running, it can cause the system to be
247	 periodically non responsive. Do not run this tracer on a
248	 production system.
249
250	 To enable this tracer, echo in "hwlat" into the current_tracer
251	 file. Every time a latency is greater than tracing_thresh, it will
252	 be recorded into the ring buffer.
253
254config ENABLE_DEFAULT_TRACERS
255	bool "Trace process context switches and events"
256	depends on !GENERIC_TRACER
257	select TRACING
258	help
259	  This tracer hooks to various trace points in the kernel,
260	  allowing the user to pick and choose which trace point they
261	  want to trace. It also includes the sched_switch tracer plugin.
262
263config FTRACE_SYSCALLS
264	bool "Trace syscalls"
265	depends on HAVE_SYSCALL_TRACEPOINTS
266	select GENERIC_TRACER
267	select KALLSYMS
268	help
269	  Basic tracer to catch the syscall entry and exit events.
270
271config TRACER_SNAPSHOT
272	bool "Create a snapshot trace buffer"
273	select TRACER_MAX_TRACE
274	help
275	  Allow tracing users to take snapshot of the current buffer using the
276	  ftrace interface, e.g.:
277
278	      echo 1 > /sys/kernel/debug/tracing/snapshot
279	      cat snapshot
280
281config TRACER_SNAPSHOT_PER_CPU_SWAP
282        bool "Allow snapshot to swap per CPU"
283	depends on TRACER_SNAPSHOT
284	select RING_BUFFER_ALLOW_SWAP
285	help
286	  Allow doing a snapshot of a single CPU buffer instead of a
287	  full swap (all buffers). If this is set, then the following is
288	  allowed:
289
290	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
291
292	  After which, only the tracing buffer for CPU 2 was swapped with
293	  the main tracing buffer, and the other CPU buffers remain the same.
294
295	  When this is enabled, this adds a little more overhead to the
296	  trace recording, as it needs to add some checks to synchronize
297	  recording with swaps. But this does not affect the performance
298	  of the overall system. This is enabled by default when the preempt
299	  or irq latency tracers are enabled, as those need to swap as well
300	  and already adds the overhead (plus a lot more).
301
302config TRACE_BRANCH_PROFILING
303	bool
304	select GENERIC_TRACER
305
306choice
307	prompt "Branch Profiling"
308	default BRANCH_PROFILE_NONE
309	help
310	 The branch profiling is a software profiler. It will add hooks
311	 into the C conditionals to test which path a branch takes.
312
313	 The likely/unlikely profiler only looks at the conditions that
314	 are annotated with a likely or unlikely macro.
315
316	 The "all branch" profiler will profile every if-statement in the
317	 kernel. This profiler will also enable the likely/unlikely
318	 profiler.
319
320	 Either of the above profilers adds a bit of overhead to the system.
321	 If unsure, choose "No branch profiling".
322
323config BRANCH_PROFILE_NONE
324	bool "No branch profiling"
325	help
326	  No branch profiling. Branch profiling adds a bit of overhead.
327	  Only enable it if you want to analyse the branching behavior.
328	  Otherwise keep it disabled.
329
330config PROFILE_ANNOTATED_BRANCHES
331	bool "Trace likely/unlikely profiler"
332	select TRACE_BRANCH_PROFILING
333	help
334	  This tracer profiles all likely and unlikely macros
335	  in the kernel. It will display the results in:
336
337	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
338
339	  Note: this will add a significant overhead; only turn this
340	  on if you need to profile the system's use of these macros.
341
342config PROFILE_ALL_BRANCHES
343	bool "Profile all if conditionals"
344	select TRACE_BRANCH_PROFILING
345	help
346	  This tracer profiles all branch conditions. Every if ()
347	  taken in the kernel is recorded whether it hit or miss.
348	  The results will be displayed in:
349
350	  /sys/kernel/debug/tracing/trace_stat/branch_all
351
352	  This option also enables the likely/unlikely profiler.
353
354	  This configuration, when enabled, will impose a great overhead
355	  on the system. This should only be enabled when the system
356	  is to be analyzed in much detail.
357endchoice
358
359config TRACING_BRANCHES
360	bool
361	help
362	  Selected by tracers that will trace the likely and unlikely
363	  conditions. This prevents the tracers themselves from being
364	  profiled. Profiling the tracing infrastructure can only happen
365	  when the likelys and unlikelys are not being traced.
366
367config BRANCH_TRACER
368	bool "Trace likely/unlikely instances"
369	depends on TRACE_BRANCH_PROFILING
370	select TRACING_BRANCHES
371	help
372	  This traces the events of likely and unlikely condition
373	  calls in the kernel.  The difference between this and the
374	  "Trace likely/unlikely profiler" is that this is not a
375	  histogram of the callers, but actually places the calling
376	  events into a running trace buffer to see when and where the
377	  events happened, as well as their results.
378
379	  Say N if unsure.
380
381config STACK_TRACER
382	bool "Trace max stack"
383	depends on HAVE_FUNCTION_TRACER
384	select FUNCTION_TRACER
385	select STACKTRACE
386	select KALLSYMS
387	help
388	  This special tracer records the maximum stack footprint of the
389	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
390
391	  This tracer works by hooking into every function call that the
392	  kernel executes, and keeping a maximum stack depth value and
393	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
394	  then it will not have any overhead while the stack tracer
395	  is disabled.
396
397	  To enable the stack tracer on bootup, pass in 'stacktrace'
398	  on the kernel command line.
399
400	  The stack tracer can also be enabled or disabled via the
401	  sysctl kernel.stack_tracer_enabled
402
403	  Say N if unsure.
404
405config BLK_DEV_IO_TRACE
406	bool "Support for tracing block IO actions"
407	depends on SYSFS
408	depends on BLOCK
409	select RELAY
410	select DEBUG_FS
411	select TRACEPOINTS
412	select GENERIC_TRACER
413	select STACKTRACE
414	help
415	  Say Y here if you want to be able to trace the block layer actions
416	  on a given queue. Tracing allows you to see any traffic happening
417	  on a block device queue. For more information (and the userspace
418	  support tools needed), fetch the blktrace tools from:
419
420	  git://git.kernel.dk/blktrace.git
421
422	  Tracing also is possible using the ftrace interface, e.g.:
423
424	    echo 1 > /sys/block/sda/sda1/trace/enable
425	    echo blk > /sys/kernel/debug/tracing/current_tracer
426	    cat /sys/kernel/debug/tracing/trace_pipe
427
428	  If unsure, say N.
429
430config KPROBE_EVENT
431	depends on KPROBES
432	depends on HAVE_REGS_AND_STACK_ACCESS_API
433	bool "Enable kprobes-based dynamic events"
434	select TRACING
435	select PROBE_EVENTS
436	default y
437	help
438	  This allows the user to add tracing events (similar to tracepoints)
439	  on the fly via the ftrace interface. See
440	  Documentation/trace/kprobetrace.txt for more details.
441
442	  Those events can be inserted wherever kprobes can probe, and record
443	  various register and memory values.
444
445	  This option is also required by perf-probe subcommand of perf tools.
446	  If you want to use perf tools, this option is strongly recommended.
447
448config UPROBE_EVENT
449	bool "Enable uprobes-based dynamic events"
450	depends on ARCH_SUPPORTS_UPROBES
451	depends on MMU
452	depends on PERF_EVENTS
453	select UPROBES
454	select PROBE_EVENTS
455	select TRACING
456	default n
457	help
458	  This allows the user to add tracing events on top of userspace
459	  dynamic events (similar to tracepoints) on the fly via the trace
460	  events interface. Those events can be inserted wherever uprobes
461	  can probe, and record various registers.
462	  This option is required if you plan to use perf-probe subcommand
463	  of perf tools on user space applications.
464
465config BPF_EVENTS
466	depends on BPF_SYSCALL
467	depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
468	bool
469	default y
470	help
471	  This allows the user to attach BPF programs to kprobe events.
472
473config PROBE_EVENTS
474	def_bool n
475
476config DYNAMIC_FTRACE
477	bool "enable/disable function tracing dynamically"
478	depends on FUNCTION_TRACER
479	depends on HAVE_DYNAMIC_FTRACE
480	default y
481	help
482	  This option will modify all the calls to function tracing
483	  dynamically (will patch them out of the binary image and
484	  replace them with a No-Op instruction) on boot up. During
485	  compile time, a table is made of all the locations that ftrace
486	  can function trace, and this table is linked into the kernel
487	  image. When this is enabled, functions can be individually
488	  enabled, and the functions not enabled will not affect
489	  performance of the system.
490
491	  See the files in /sys/kernel/debug/tracing:
492	    available_filter_functions
493	    set_ftrace_filter
494	    set_ftrace_notrace
495
496	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
497	  otherwise has native performance as long as no tracing is active.
498
499config DYNAMIC_FTRACE_WITH_REGS
500	def_bool y
501	depends on DYNAMIC_FTRACE
502	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
503
504config FUNCTION_PROFILER
505	bool "Kernel function profiler"
506	depends on FUNCTION_TRACER
507	default n
508	help
509	  This option enables the kernel function profiler. A file is created
510	  in debugfs called function_profile_enabled which defaults to zero.
511	  When a 1 is echoed into this file profiling begins, and when a
512	  zero is entered, profiling stops. A "functions" file is created in
513	  the trace_stats directory; this file shows the list of functions that
514	  have been hit and their counters.
515
516	  If in doubt, say N.
517
518config FTRACE_MCOUNT_RECORD
519	def_bool y
520	depends on DYNAMIC_FTRACE
521	depends on HAVE_FTRACE_MCOUNT_RECORD
522
523config FTRACE_SELFTEST
524	bool
525
526config FTRACE_STARTUP_TEST
527	bool "Perform a startup test on ftrace"
528	depends on GENERIC_TRACER
529	select FTRACE_SELFTEST
530	help
531	  This option performs a series of startup tests on ftrace. On bootup
532	  a series of tests are made to verify that the tracer is
533	  functioning properly. It will do tests on all the configured
534	  tracers of ftrace.
535
536config EVENT_TRACE_TEST_SYSCALLS
537	bool "Run selftest on syscall events"
538	depends on FTRACE_STARTUP_TEST
539	help
540	 This option will also enable testing every syscall event.
541	 It only enables the event and disables it and runs various loads
542	 with the event enabled. This adds a bit more time for kernel boot
543	 up since it runs this on every system call defined.
544
545	 TBD - enable a way to actually call the syscalls as we test their
546	       events
547
548config MMIOTRACE
549	bool "Memory mapped IO tracing"
550	depends on HAVE_MMIOTRACE_SUPPORT && PCI
551	select GENERIC_TRACER
552	help
553	  Mmiotrace traces Memory Mapped I/O access and is meant for
554	  debugging and reverse engineering. It is called from the ioremap
555	  implementation and works via page faults. Tracing is disabled by
556	  default and can be enabled at run-time.
557
558	  See Documentation/trace/mmiotrace.txt.
559	  If you are not helping to develop drivers, say N.
560
561config TRACING_MAP
562	bool
563	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
564	help
565	  tracing_map is a special-purpose lock-free map for tracing,
566	  separated out as a stand-alone facility in order to allow it
567	  to be shared between multiple tracers.  It isn't meant to be
568	  generally used outside of that context, and is normally
569	  selected by tracers that use it.
570
571config HIST_TRIGGERS
572	bool "Histogram triggers"
573	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
574	select TRACING_MAP
575	select TRACING
576	default n
577	help
578	  Hist triggers allow one or more arbitrary trace event fields
579	  to be aggregated into hash tables and dumped to stdout by
580	  reading a debugfs/tracefs file.  They're useful for
581	  gathering quick and dirty (though precise) summaries of
582	  event activity as an initial guide for further investigation
583	  using more advanced tools.
584
585	  See Documentation/trace/events.txt.
586	  If in doubt, say N.
587
588config MMIOTRACE_TEST
589	tristate "Test module for mmiotrace"
590	depends on MMIOTRACE && m
591	help
592	  This is a dumb module for testing mmiotrace. It is very dangerous
593	  as it will write garbage to IO memory starting at a given address.
594	  However, it should be safe to use on e.g. unused portion of VRAM.
595
596	  Say N, unless you absolutely know what you are doing.
597
598config TRACEPOINT_BENCHMARK
599        bool "Add tracepoint that benchmarks tracepoints"
600	help
601	 This option creates the tracepoint "benchmark:benchmark_event".
602	 When the tracepoint is enabled, it kicks off a kernel thread that
603	 goes into an infinite loop (calling cond_sched() to let other tasks
604	 run), and calls the tracepoint. Each iteration will record the time
605	 it took to write to the tracepoint and the next iteration that
606	 data will be passed to the tracepoint itself. That is, the tracepoint
607	 will report the time it took to do the previous tracepoint.
608	 The string written to the tracepoint is a static string of 128 bytes
609	 to keep the time the same. The initial string is simply a write of
610	 "START". The second string records the cold cache time of the first
611	 write which is not added to the rest of the calculations.
612
613	 As it is a tight loop, it benchmarks as hot cache. That's fine because
614	 we care most about hot paths that are probably in cache already.
615
616	 An example of the output:
617
618	      START
619	      first=3672 [COLD CACHED]
620	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
621	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
622	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
623	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
624	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
625	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
626
627
628config RING_BUFFER_BENCHMARK
629	tristate "Ring buffer benchmark stress tester"
630	depends on RING_BUFFER
631	help
632	  This option creates a test to stress the ring buffer and benchmark it.
633	  It creates its own ring buffer such that it will not interfere with
634	  any other users of the ring buffer (such as ftrace). It then creates
635	  a producer and consumer that will run for 10 seconds and sleep for
636	  10 seconds. Each interval it will print out the number of events
637	  it recorded and give a rough estimate of how long each iteration took.
638
639	  It does not disable interrupts or raise its priority, so it may be
640	  affected by processes that are running.
641
642	  If unsure, say N.
643
644config RING_BUFFER_STARTUP_TEST
645       bool "Ring buffer startup self test"
646       depends on RING_BUFFER
647       help
648         Run a simple self test on the ring buffer on boot up. Late in the
649	 kernel boot sequence, the test will start that kicks off
650	 a thread per cpu. Each thread will write various size events
651	 into the ring buffer. Another thread is created to send IPIs
652	 to each of the threads, where the IPI handler will also write
653	 to the ring buffer, to test/stress the nesting ability.
654	 If any anomalies are discovered, a warning will be displayed
655	 and all ring buffers will be disabled.
656
657	 The test runs for 10 seconds. This will slow your boot time
658	 by at least 10 more seconds.
659
660	 At the end of the test, statics and more checks are done.
661	 It will output the stats of each per cpu buffer. What
662	 was written, the sizes, what was read, what was lost, and
663	 other similar details.
664
665	 If unsure, say N
666
667config TRACE_ENUM_MAP_FILE
668       bool "Show enum mappings for trace events"
669       depends on TRACING
670       help
671        The "print fmt" of the trace events will show the enum names instead
672	of their values. This can cause problems for user space tools that
673	use this string to parse the raw data as user space does not know
674	how to convert the string to its value.
675
676	To fix this, there's a special macro in the kernel that can be used
677	to convert the enum into its value. If this macro is used, then the
678	print fmt strings will have the enums converted to their values.
679
680	If something does not get converted properly, this option can be
681	used to show what enums the kernel tried to convert.
682
683	This option is for debugging the enum conversions. A file is created
684	in the tracing directory called "enum_map" that will show the enum
685	names matched with their values and what trace event system they
686	belong too.
687
688	Normally, the mapping of the strings to values will be freed after
689	boot up or module load. With this option, they will not be freed, as
690	they are needed for the "enum_map" file. Enabling this option will
691	increase the memory footprint of the running kernel.
692
693	If unsure, say N
694
695config TRACING_EVENTS_GPIO
696	bool "Trace gpio events"
697	depends on GPIOLIB
698	default y
699	help
700	  Enable tracing events for gpio subsystem
701
702endif # FTRACE
703
704endif # TRACING_SUPPORT
705
706