xref: /openbmc/linux/kernel/trace/trace.h (revision aa5b395b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5 
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/trace.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/trace_seq.h>
17 #include <linux/trace_events.h>
18 #include <linux/compiler.h>
19 #include <linux/glob.h>
20 #include <linux/irq_work.h>
21 #include <linux/workqueue.h>
22 
23 #ifdef CONFIG_FTRACE_SYSCALLS
24 #include <asm/unistd.h>		/* For NR_SYSCALLS	     */
25 #include <asm/syscall.h>	/* some archs define it here */
26 #endif
27 
28 enum trace_type {
29 	__TRACE_FIRST_TYPE = 0,
30 
31 	TRACE_FN,
32 	TRACE_CTX,
33 	TRACE_WAKE,
34 	TRACE_STACK,
35 	TRACE_PRINT,
36 	TRACE_BPRINT,
37 	TRACE_MMIO_RW,
38 	TRACE_MMIO_MAP,
39 	TRACE_BRANCH,
40 	TRACE_GRAPH_RET,
41 	TRACE_GRAPH_ENT,
42 	TRACE_USER_STACK,
43 	TRACE_BLK,
44 	TRACE_BPUTS,
45 	TRACE_HWLAT,
46 	TRACE_RAW_DATA,
47 
48 	__TRACE_LAST_TYPE,
49 };
50 
51 
52 #undef __field
53 #define __field(type, item)		type	item;
54 
55 #undef __field_fn
56 #define __field_fn(type, item)		type	item;
57 
58 #undef __field_struct
59 #define __field_struct(type, item)	__field(type, item)
60 
61 #undef __field_desc
62 #define __field_desc(type, container, item)
63 
64 #undef __array
65 #define __array(type, item, size)	type	item[size];
66 
67 #undef __array_desc
68 #define __array_desc(type, container, item, size)
69 
70 #undef __dynamic_array
71 #define __dynamic_array(type, item)	type	item[];
72 
73 #undef F_STRUCT
74 #define F_STRUCT(args...)		args
75 
76 #undef FTRACE_ENTRY
77 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print)		\
78 	struct struct_name {						\
79 		struct trace_entry	ent;				\
80 		tstruct							\
81 	}
82 
83 #undef FTRACE_ENTRY_DUP
84 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
85 
86 #undef FTRACE_ENTRY_REG
87 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	regfn)	\
88 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
89 
90 #undef FTRACE_ENTRY_PACKED
91 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)	\
92 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
93 
94 #include "trace_entries.h"
95 
96 /*
97  * syscalls are special, and need special handling, this is why
98  * they are not included in trace_entries.h
99  */
100 struct syscall_trace_enter {
101 	struct trace_entry	ent;
102 	int			nr;
103 	unsigned long		args[];
104 };
105 
106 struct syscall_trace_exit {
107 	struct trace_entry	ent;
108 	int			nr;
109 	long			ret;
110 };
111 
112 struct kprobe_trace_entry_head {
113 	struct trace_entry	ent;
114 	unsigned long		ip;
115 };
116 
117 struct kretprobe_trace_entry_head {
118 	struct trace_entry	ent;
119 	unsigned long		func;
120 	unsigned long		ret_ip;
121 };
122 
123 /*
124  * trace_flag_type is an enumeration that holds different
125  * states when a trace occurs. These are:
126  *  IRQS_OFF		- interrupts were disabled
127  *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
128  *  NEED_RESCHED	- reschedule is requested
129  *  HARDIRQ		- inside an interrupt handler
130  *  SOFTIRQ		- inside a softirq handler
131  */
132 enum trace_flag_type {
133 	TRACE_FLAG_IRQS_OFF		= 0x01,
134 	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
135 	TRACE_FLAG_NEED_RESCHED		= 0x04,
136 	TRACE_FLAG_HARDIRQ		= 0x08,
137 	TRACE_FLAG_SOFTIRQ		= 0x10,
138 	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
139 	TRACE_FLAG_NMI			= 0x40,
140 };
141 
142 #define TRACE_BUF_SIZE		1024
143 
144 struct trace_array;
145 
146 /*
147  * The CPU trace array - it consists of thousands of trace entries
148  * plus some other descriptor data: (for example which task started
149  * the trace, etc.)
150  */
151 struct trace_array_cpu {
152 	atomic_t		disabled;
153 	void			*buffer_page;	/* ring buffer spare */
154 
155 	unsigned long		entries;
156 	unsigned long		saved_latency;
157 	unsigned long		critical_start;
158 	unsigned long		critical_end;
159 	unsigned long		critical_sequence;
160 	unsigned long		nice;
161 	unsigned long		policy;
162 	unsigned long		rt_priority;
163 	unsigned long		skipped_entries;
164 	u64			preempt_timestamp;
165 	pid_t			pid;
166 	kuid_t			uid;
167 	char			comm[TASK_COMM_LEN];
168 
169 	bool			ignore_pid;
170 #ifdef CONFIG_FUNCTION_TRACER
171 	bool			ftrace_ignore_pid;
172 #endif
173 };
174 
175 struct tracer;
176 struct trace_option_dentry;
177 
178 struct trace_buffer {
179 	struct trace_array		*tr;
180 	struct ring_buffer		*buffer;
181 	struct trace_array_cpu __percpu	*data;
182 	u64				time_start;
183 	int				cpu;
184 };
185 
186 #define TRACE_FLAGS_MAX_SIZE		32
187 
188 struct trace_options {
189 	struct tracer			*tracer;
190 	struct trace_option_dentry	*topts;
191 };
192 
193 struct trace_pid_list {
194 	int				pid_max;
195 	unsigned long			*pids;
196 };
197 
198 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
199 
200 /**
201  * struct cond_snapshot - conditional snapshot data and callback
202  *
203  * The cond_snapshot structure encapsulates a callback function and
204  * data associated with the snapshot for a given tracing instance.
205  *
206  * When a snapshot is taken conditionally, by invoking
207  * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
208  * passed in turn to the cond_snapshot.update() function.  That data
209  * can be compared by the update() implementation with the cond_data
210  * contained wihin the struct cond_snapshot instance associated with
211  * the trace_array.  Because the tr->max_lock is held throughout the
212  * update() call, the update() function can directly retrieve the
213  * cond_snapshot and cond_data associated with the per-instance
214  * snapshot associated with the trace_array.
215  *
216  * The cond_snapshot.update() implementation can save data to be
217  * associated with the snapshot if it decides to, and returns 'true'
218  * in that case, or it returns 'false' if the conditional snapshot
219  * shouldn't be taken.
220  *
221  * The cond_snapshot instance is created and associated with the
222  * user-defined cond_data by tracing_cond_snapshot_enable().
223  * Likewise, the cond_snapshot instance is destroyed and is no longer
224  * associated with the trace instance by
225  * tracing_cond_snapshot_disable().
226  *
227  * The method below is required.
228  *
229  * @update: When a conditional snapshot is invoked, the update()
230  *	callback function is invoked with the tr->max_lock held.  The
231  *	update() implementation signals whether or not to actually
232  *	take the snapshot, by returning 'true' if so, 'false' if no
233  *	snapshot should be taken.  Because the max_lock is held for
234  *	the duration of update(), the implementation is safe to
235  *	directly retrieven and save any implementation data it needs
236  *	to in association with the snapshot.
237  */
238 struct cond_snapshot {
239 	void				*cond_data;
240 	cond_update_fn_t		update;
241 };
242 
243 /*
244  * The trace array - an array of per-CPU trace arrays. This is the
245  * highest level data structure that individual tracers deal with.
246  * They have on/off state as well:
247  */
248 struct trace_array {
249 	struct list_head	list;
250 	char			*name;
251 	struct trace_buffer	trace_buffer;
252 #ifdef CONFIG_TRACER_MAX_TRACE
253 	/*
254 	 * The max_buffer is used to snapshot the trace when a maximum
255 	 * latency is reached, or when the user initiates a snapshot.
256 	 * Some tracers will use this to store a maximum trace while
257 	 * it continues examining live traces.
258 	 *
259 	 * The buffers for the max_buffer are set up the same as the trace_buffer
260 	 * When a snapshot is taken, the buffer of the max_buffer is swapped
261 	 * with the buffer of the trace_buffer and the buffers are reset for
262 	 * the trace_buffer so the tracing can continue.
263 	 */
264 	struct trace_buffer	max_buffer;
265 	bool			allocated_snapshot;
266 #endif
267 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
268 	unsigned long		max_latency;
269 #ifdef CONFIG_FSNOTIFY
270 	struct dentry		*d_max_latency;
271 	struct work_struct	fsnotify_work;
272 	struct irq_work		fsnotify_irqwork;
273 #endif
274 #endif
275 	struct trace_pid_list	__rcu *filtered_pids;
276 	/*
277 	 * max_lock is used to protect the swapping of buffers
278 	 * when taking a max snapshot. The buffers themselves are
279 	 * protected by per_cpu spinlocks. But the action of the swap
280 	 * needs its own lock.
281 	 *
282 	 * This is defined as a arch_spinlock_t in order to help
283 	 * with performance when lockdep debugging is enabled.
284 	 *
285 	 * It is also used in other places outside the update_max_tr
286 	 * so it needs to be defined outside of the
287 	 * CONFIG_TRACER_MAX_TRACE.
288 	 */
289 	arch_spinlock_t		max_lock;
290 	int			buffer_disabled;
291 #ifdef CONFIG_FTRACE_SYSCALLS
292 	int			sys_refcount_enter;
293 	int			sys_refcount_exit;
294 	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
295 	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
296 #endif
297 	int			stop_count;
298 	int			clock_id;
299 	int			nr_topts;
300 	bool			clear_trace;
301 	int			buffer_percent;
302 	unsigned int		n_err_log_entries;
303 	struct tracer		*current_trace;
304 	unsigned int		trace_flags;
305 	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
306 	unsigned int		flags;
307 	raw_spinlock_t		start_lock;
308 	struct list_head	err_log;
309 	struct dentry		*dir;
310 	struct dentry		*options;
311 	struct dentry		*percpu_dir;
312 	struct dentry		*event_dir;
313 	struct trace_options	*topts;
314 	struct list_head	systems;
315 	struct list_head	events;
316 	struct trace_event_file *trace_marker_file;
317 	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
318 	int			ref;
319 #ifdef CONFIG_FUNCTION_TRACER
320 	struct ftrace_ops	*ops;
321 	struct trace_pid_list	__rcu *function_pids;
322 #ifdef CONFIG_DYNAMIC_FTRACE
323 	/* All of these are protected by the ftrace_lock */
324 	struct list_head	func_probes;
325 	struct list_head	mod_trace;
326 	struct list_head	mod_notrace;
327 #endif
328 	/* function tracing enabled */
329 	int			function_enabled;
330 #endif
331 	int			time_stamp_abs_ref;
332 	struct list_head	hist_vars;
333 #ifdef CONFIG_TRACER_SNAPSHOT
334 	struct cond_snapshot	*cond_snapshot;
335 #endif
336 };
337 
338 enum {
339 	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
340 };
341 
342 extern struct list_head ftrace_trace_arrays;
343 
344 extern struct mutex trace_types_lock;
345 
346 extern int trace_array_get(struct trace_array *tr);
347 extern int tracing_check_open_get_tr(struct trace_array *tr);
348 
349 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
350 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
351 
352 extern bool trace_clock_in_ns(struct trace_array *tr);
353 
354 /*
355  * The global tracer (top) should be the first trace array added,
356  * but we check the flag anyway.
357  */
358 static inline struct trace_array *top_trace_array(void)
359 {
360 	struct trace_array *tr;
361 
362 	if (list_empty(&ftrace_trace_arrays))
363 		return NULL;
364 
365 	tr = list_entry(ftrace_trace_arrays.prev,
366 			typeof(*tr), list);
367 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
368 	return tr;
369 }
370 
371 #define FTRACE_CMP_TYPE(var, type) \
372 	__builtin_types_compatible_p(typeof(var), type *)
373 
374 #undef IF_ASSIGN
375 #define IF_ASSIGN(var, entry, etype, id)			\
376 	if (FTRACE_CMP_TYPE(var, etype)) {			\
377 		var = (typeof(var))(entry);			\
378 		WARN_ON(id != 0 && (entry)->type != id);	\
379 		break;						\
380 	}
381 
382 /* Will cause compile errors if type is not found. */
383 extern void __ftrace_bad_type(void);
384 
385 /*
386  * The trace_assign_type is a verifier that the entry type is
387  * the same as the type being assigned. To add new types simply
388  * add a line with the following format:
389  *
390  * IF_ASSIGN(var, ent, type, id);
391  *
392  *  Where "type" is the trace type that includes the trace_entry
393  *  as the "ent" item. And "id" is the trace identifier that is
394  *  used in the trace_type enum.
395  *
396  *  If the type can have more than one id, then use zero.
397  */
398 #define trace_assign_type(var, ent)					\
399 	do {								\
400 		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
401 		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
402 		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
403 		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
404 		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
405 		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
406 		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
407 		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
408 		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
409 		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
410 			  TRACE_MMIO_RW);				\
411 		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
412 			  TRACE_MMIO_MAP);				\
413 		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
414 		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
415 			  TRACE_GRAPH_ENT);		\
416 		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
417 			  TRACE_GRAPH_RET);		\
418 		__ftrace_bad_type();					\
419 	} while (0)
420 
421 /*
422  * An option specific to a tracer. This is a boolean value.
423  * The bit is the bit index that sets its value on the
424  * flags value in struct tracer_flags.
425  */
426 struct tracer_opt {
427 	const char	*name; /* Will appear on the trace_options file */
428 	u32		bit; /* Mask assigned in val field in tracer_flags */
429 };
430 
431 /*
432  * The set of specific options for a tracer. Your tracer
433  * have to set the initial value of the flags val.
434  */
435 struct tracer_flags {
436 	u32			val;
437 	struct tracer_opt	*opts;
438 	struct tracer		*trace;
439 };
440 
441 /* Makes more easy to define a tracer opt */
442 #define TRACER_OPT(s, b)	.name = #s, .bit = b
443 
444 
445 struct trace_option_dentry {
446 	struct tracer_opt		*opt;
447 	struct tracer_flags		*flags;
448 	struct trace_array		*tr;
449 	struct dentry			*entry;
450 };
451 
452 /**
453  * struct tracer - a specific tracer and its callbacks to interact with tracefs
454  * @name: the name chosen to select it on the available_tracers file
455  * @init: called when one switches to this tracer (echo name > current_tracer)
456  * @reset: called when one switches to another tracer
457  * @start: called when tracing is unpaused (echo 1 > tracing_on)
458  * @stop: called when tracing is paused (echo 0 > tracing_on)
459  * @update_thresh: called when tracing_thresh is updated
460  * @open: called when the trace file is opened
461  * @pipe_open: called when the trace_pipe file is opened
462  * @close: called when the trace file is released
463  * @pipe_close: called when the trace_pipe file is released
464  * @read: override the default read callback on trace_pipe
465  * @splice_read: override the default splice_read callback on trace_pipe
466  * @selftest: selftest to run on boot (see trace_selftest.c)
467  * @print_headers: override the first lines that describe your columns
468  * @print_line: callback that prints a trace
469  * @set_flag: signals one of your private flags changed (trace_options file)
470  * @flags: your private flags
471  */
472 struct tracer {
473 	const char		*name;
474 	int			(*init)(struct trace_array *tr);
475 	void			(*reset)(struct trace_array *tr);
476 	void			(*start)(struct trace_array *tr);
477 	void			(*stop)(struct trace_array *tr);
478 	int			(*update_thresh)(struct trace_array *tr);
479 	void			(*open)(struct trace_iterator *iter);
480 	void			(*pipe_open)(struct trace_iterator *iter);
481 	void			(*close)(struct trace_iterator *iter);
482 	void			(*pipe_close)(struct trace_iterator *iter);
483 	ssize_t			(*read)(struct trace_iterator *iter,
484 					struct file *filp, char __user *ubuf,
485 					size_t cnt, loff_t *ppos);
486 	ssize_t			(*splice_read)(struct trace_iterator *iter,
487 					       struct file *filp,
488 					       loff_t *ppos,
489 					       struct pipe_inode_info *pipe,
490 					       size_t len,
491 					       unsigned int flags);
492 #ifdef CONFIG_FTRACE_STARTUP_TEST
493 	int			(*selftest)(struct tracer *trace,
494 					    struct trace_array *tr);
495 #endif
496 	void			(*print_header)(struct seq_file *m);
497 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
498 	/* If you handled the flag setting, return 0 */
499 	int			(*set_flag)(struct trace_array *tr,
500 					    u32 old_flags, u32 bit, int set);
501 	/* Return 0 if OK with change, else return non-zero */
502 	int			(*flag_changed)(struct trace_array *tr,
503 						u32 mask, int set);
504 	struct tracer		*next;
505 	struct tracer_flags	*flags;
506 	int			enabled;
507 	int			ref;
508 	bool			print_max;
509 	bool			allow_instances;
510 #ifdef CONFIG_TRACER_MAX_TRACE
511 	bool			use_max_tr;
512 #endif
513 	/* True if tracer cannot be enabled in kernel param */
514 	bool			noboot;
515 };
516 
517 
518 /* Only current can touch trace_recursion */
519 
520 /*
521  * For function tracing recursion:
522  *  The order of these bits are important.
523  *
524  *  When function tracing occurs, the following steps are made:
525  *   If arch does not support a ftrace feature:
526  *    call internal function (uses INTERNAL bits) which calls...
527  *   If callback is registered to the "global" list, the list
528  *    function is called and recursion checks the GLOBAL bits.
529  *    then this function calls...
530  *   The function callback, which can use the FTRACE bits to
531  *    check for recursion.
532  *
533  * Now if the arch does not suppport a feature, and it calls
534  * the global list function which calls the ftrace callback
535  * all three of these steps will do a recursion protection.
536  * There's no reason to do one if the previous caller already
537  * did. The recursion that we are protecting against will
538  * go through the same steps again.
539  *
540  * To prevent the multiple recursion checks, if a recursion
541  * bit is set that is higher than the MAX bit of the current
542  * check, then we know that the check was made by the previous
543  * caller, and we can skip the current check.
544  */
545 enum {
546 	TRACE_BUFFER_BIT,
547 	TRACE_BUFFER_NMI_BIT,
548 	TRACE_BUFFER_IRQ_BIT,
549 	TRACE_BUFFER_SIRQ_BIT,
550 
551 	/* Start of function recursion bits */
552 	TRACE_FTRACE_BIT,
553 	TRACE_FTRACE_NMI_BIT,
554 	TRACE_FTRACE_IRQ_BIT,
555 	TRACE_FTRACE_SIRQ_BIT,
556 
557 	/* INTERNAL_BITs must be greater than FTRACE_BITs */
558 	TRACE_INTERNAL_BIT,
559 	TRACE_INTERNAL_NMI_BIT,
560 	TRACE_INTERNAL_IRQ_BIT,
561 	TRACE_INTERNAL_SIRQ_BIT,
562 
563 	TRACE_BRANCH_BIT,
564 /*
565  * Abuse of the trace_recursion.
566  * As we need a way to maintain state if we are tracing the function
567  * graph in irq because we want to trace a particular function that
568  * was called in irq context but we have irq tracing off. Since this
569  * can only be modified by current, we can reuse trace_recursion.
570  */
571 	TRACE_IRQ_BIT,
572 
573 	/* Set if the function is in the set_graph_function file */
574 	TRACE_GRAPH_BIT,
575 
576 	/*
577 	 * In the very unlikely case that an interrupt came in
578 	 * at a start of graph tracing, and we want to trace
579 	 * the function in that interrupt, the depth can be greater
580 	 * than zero, because of the preempted start of a previous
581 	 * trace. In an even more unlikely case, depth could be 2
582 	 * if a softirq interrupted the start of graph tracing,
583 	 * followed by an interrupt preempting a start of graph
584 	 * tracing in the softirq, and depth can even be 3
585 	 * if an NMI came in at the start of an interrupt function
586 	 * that preempted a softirq start of a function that
587 	 * preempted normal context!!!! Luckily, it can't be
588 	 * greater than 3, so the next two bits are a mask
589 	 * of what the depth is when we set TRACE_GRAPH_BIT
590 	 */
591 
592 	TRACE_GRAPH_DEPTH_START_BIT,
593 	TRACE_GRAPH_DEPTH_END_BIT,
594 
595 	/*
596 	 * To implement set_graph_notrace, if this bit is set, we ignore
597 	 * function graph tracing of called functions, until the return
598 	 * function is called to clear it.
599 	 */
600 	TRACE_GRAPH_NOTRACE_BIT,
601 };
602 
603 #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
604 #define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
605 #define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
606 
607 #define trace_recursion_depth() \
608 	(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
609 #define trace_recursion_set_depth(depth) \
610 	do {								\
611 		current->trace_recursion &=				\
612 			~(3 << TRACE_GRAPH_DEPTH_START_BIT);		\
613 		current->trace_recursion |=				\
614 			((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;	\
615 	} while (0)
616 
617 #define TRACE_CONTEXT_BITS	4
618 
619 #define TRACE_FTRACE_START	TRACE_FTRACE_BIT
620 #define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
621 
622 #define TRACE_LIST_START	TRACE_INTERNAL_BIT
623 #define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
624 
625 #define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
626 
627 static __always_inline int trace_get_context_bit(void)
628 {
629 	int bit;
630 
631 	if (in_interrupt()) {
632 		if (in_nmi())
633 			bit = 0;
634 
635 		else if (in_irq())
636 			bit = 1;
637 		else
638 			bit = 2;
639 	} else
640 		bit = 3;
641 
642 	return bit;
643 }
644 
645 static __always_inline int trace_test_and_set_recursion(int start, int max)
646 {
647 	unsigned int val = current->trace_recursion;
648 	int bit;
649 
650 	/* A previous recursion check was made */
651 	if ((val & TRACE_CONTEXT_MASK) > max)
652 		return 0;
653 
654 	bit = trace_get_context_bit() + start;
655 	if (unlikely(val & (1 << bit)))
656 		return -1;
657 
658 	val |= 1 << bit;
659 	current->trace_recursion = val;
660 	barrier();
661 
662 	return bit;
663 }
664 
665 static __always_inline void trace_clear_recursion(int bit)
666 {
667 	unsigned int val = current->trace_recursion;
668 
669 	if (!bit)
670 		return;
671 
672 	bit = 1 << bit;
673 	val &= ~bit;
674 
675 	barrier();
676 	current->trace_recursion = val;
677 }
678 
679 static inline struct ring_buffer_iter *
680 trace_buffer_iter(struct trace_iterator *iter, int cpu)
681 {
682 	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
683 }
684 
685 int tracer_init(struct tracer *t, struct trace_array *tr);
686 int tracing_is_enabled(void);
687 void tracing_reset_online_cpus(struct trace_buffer *buf);
688 void tracing_reset_current(int cpu);
689 void tracing_reset_all_online_cpus(void);
690 int tracing_open_generic(struct inode *inode, struct file *filp);
691 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
692 bool tracing_is_disabled(void);
693 bool tracer_tracing_is_on(struct trace_array *tr);
694 void tracer_tracing_on(struct trace_array *tr);
695 void tracer_tracing_off(struct trace_array *tr);
696 struct dentry *trace_create_file(const char *name,
697 				 umode_t mode,
698 				 struct dentry *parent,
699 				 void *data,
700 				 const struct file_operations *fops);
701 
702 struct dentry *tracing_init_dentry(void);
703 
704 struct ring_buffer_event;
705 
706 struct ring_buffer_event *
707 trace_buffer_lock_reserve(struct ring_buffer *buffer,
708 			  int type,
709 			  unsigned long len,
710 			  unsigned long flags,
711 			  int pc);
712 
713 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
714 						struct trace_array_cpu *data);
715 
716 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
717 					  int *ent_cpu, u64 *ent_ts);
718 
719 void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
720 					struct ring_buffer_event *event);
721 
722 int trace_empty(struct trace_iterator *iter);
723 
724 void *trace_find_next_entry_inc(struct trace_iterator *iter);
725 
726 void trace_init_global_iter(struct trace_iterator *iter);
727 
728 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
729 
730 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
731 unsigned long trace_total_entries(struct trace_array *tr);
732 
733 void trace_function(struct trace_array *tr,
734 		    unsigned long ip,
735 		    unsigned long parent_ip,
736 		    unsigned long flags, int pc);
737 void trace_graph_function(struct trace_array *tr,
738 		    unsigned long ip,
739 		    unsigned long parent_ip,
740 		    unsigned long flags, int pc);
741 void trace_latency_header(struct seq_file *m);
742 void trace_default_header(struct seq_file *m);
743 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
744 int trace_empty(struct trace_iterator *iter);
745 
746 void trace_graph_return(struct ftrace_graph_ret *trace);
747 int trace_graph_entry(struct ftrace_graph_ent *trace);
748 void set_graph_array(struct trace_array *tr);
749 
750 void tracing_start_cmdline_record(void);
751 void tracing_stop_cmdline_record(void);
752 void tracing_start_tgid_record(void);
753 void tracing_stop_tgid_record(void);
754 
755 int register_tracer(struct tracer *type);
756 int is_tracing_stopped(void);
757 
758 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
759 
760 extern cpumask_var_t __read_mostly tracing_buffer_mask;
761 
762 #define for_each_tracing_cpu(cpu)	\
763 	for_each_cpu(cpu, tracing_buffer_mask)
764 
765 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
766 
767 extern unsigned long tracing_thresh;
768 
769 /* PID filtering */
770 
771 extern int pid_max;
772 
773 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
774 			     pid_t search_pid);
775 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
776 			    struct task_struct *task);
777 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
778 				  struct task_struct *self,
779 				  struct task_struct *task);
780 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
781 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
782 int trace_pid_show(struct seq_file *m, void *v);
783 void trace_free_pid_list(struct trace_pid_list *pid_list);
784 int trace_pid_write(struct trace_pid_list *filtered_pids,
785 		    struct trace_pid_list **new_pid_list,
786 		    const char __user *ubuf, size_t cnt);
787 
788 #ifdef CONFIG_TRACER_MAX_TRACE
789 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
790 		   void *cond_data);
791 void update_max_tr_single(struct trace_array *tr,
792 			  struct task_struct *tsk, int cpu);
793 #endif /* CONFIG_TRACER_MAX_TRACE */
794 
795 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
796 	defined(CONFIG_FSNOTIFY)
797 
798 void latency_fsnotify(struct trace_array *tr);
799 
800 #else
801 
802 static inline void latency_fsnotify(struct trace_array *tr) { }
803 
804 #endif
805 
806 #ifdef CONFIG_STACKTRACE
807 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
808 		   int pc);
809 #else
810 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
811 				 int skip, int pc)
812 {
813 }
814 #endif /* CONFIG_STACKTRACE */
815 
816 extern u64 ftrace_now(int cpu);
817 
818 extern void trace_find_cmdline(int pid, char comm[]);
819 extern int trace_find_tgid(int pid);
820 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
821 
822 #ifdef CONFIG_DYNAMIC_FTRACE
823 extern unsigned long ftrace_update_tot_cnt;
824 extern unsigned long ftrace_number_of_pages;
825 extern unsigned long ftrace_number_of_groups;
826 void ftrace_init_trace_array(struct trace_array *tr);
827 #else
828 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
829 #endif
830 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
831 extern int DYN_FTRACE_TEST_NAME(void);
832 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
833 extern int DYN_FTRACE_TEST_NAME2(void);
834 
835 extern bool ring_buffer_expanded;
836 extern bool tracing_selftest_disabled;
837 
838 #ifdef CONFIG_FTRACE_STARTUP_TEST
839 extern int trace_selftest_startup_function(struct tracer *trace,
840 					   struct trace_array *tr);
841 extern int trace_selftest_startup_function_graph(struct tracer *trace,
842 						 struct trace_array *tr);
843 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
844 					  struct trace_array *tr);
845 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
846 					     struct trace_array *tr);
847 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
848 						 struct trace_array *tr);
849 extern int trace_selftest_startup_wakeup(struct tracer *trace,
850 					 struct trace_array *tr);
851 extern int trace_selftest_startup_nop(struct tracer *trace,
852 					 struct trace_array *tr);
853 extern int trace_selftest_startup_branch(struct tracer *trace,
854 					 struct trace_array *tr);
855 /*
856  * Tracer data references selftest functions that only occur
857  * on boot up. These can be __init functions. Thus, when selftests
858  * are enabled, then the tracers need to reference __init functions.
859  */
860 #define __tracer_data		__refdata
861 #else
862 /* Tracers are seldom changed. Optimize when selftests are disabled. */
863 #define __tracer_data		__read_mostly
864 #endif /* CONFIG_FTRACE_STARTUP_TEST */
865 
866 extern void *head_page(struct trace_array_cpu *data);
867 extern unsigned long long ns2usecs(u64 nsec);
868 extern int
869 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
870 extern int
871 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
872 extern int
873 trace_array_vprintk(struct trace_array *tr,
874 		    unsigned long ip, const char *fmt, va_list args);
875 int trace_array_printk_buf(struct ring_buffer *buffer,
876 			   unsigned long ip, const char *fmt, ...);
877 void trace_printk_seq(struct trace_seq *s);
878 enum print_line_t print_trace_line(struct trace_iterator *iter);
879 
880 extern char trace_find_mark(unsigned long long duration);
881 
882 struct ftrace_hash;
883 
884 struct ftrace_mod_load {
885 	struct list_head	list;
886 	char			*func;
887 	char			*module;
888 	int			 enable;
889 };
890 
891 enum {
892 	FTRACE_HASH_FL_MOD	= (1 << 0),
893 };
894 
895 struct ftrace_hash {
896 	unsigned long		size_bits;
897 	struct hlist_head	*buckets;
898 	unsigned long		count;
899 	unsigned long		flags;
900 	struct rcu_head		rcu;
901 };
902 
903 struct ftrace_func_entry *
904 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
905 
906 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
907 {
908 	return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
909 }
910 
911 /* Standard output formatting function used for function return traces */
912 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
913 
914 /* Flag options */
915 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
916 #define TRACE_GRAPH_PRINT_CPU           0x2
917 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
918 #define TRACE_GRAPH_PRINT_PROC          0x8
919 #define TRACE_GRAPH_PRINT_DURATION      0x10
920 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
921 #define TRACE_GRAPH_PRINT_REL_TIME      0x40
922 #define TRACE_GRAPH_PRINT_IRQS          0x80
923 #define TRACE_GRAPH_PRINT_TAIL          0x100
924 #define TRACE_GRAPH_SLEEP_TIME          0x200
925 #define TRACE_GRAPH_GRAPH_TIME          0x400
926 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
927 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
928 
929 extern void ftrace_graph_sleep_time_control(bool enable);
930 
931 #ifdef CONFIG_FUNCTION_PROFILER
932 extern void ftrace_graph_graph_time_control(bool enable);
933 #else
934 static inline void ftrace_graph_graph_time_control(bool enable) { }
935 #endif
936 
937 extern enum print_line_t
938 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
939 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
940 extern void
941 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
942 extern void graph_trace_open(struct trace_iterator *iter);
943 extern void graph_trace_close(struct trace_iterator *iter);
944 extern int __trace_graph_entry(struct trace_array *tr,
945 			       struct ftrace_graph_ent *trace,
946 			       unsigned long flags, int pc);
947 extern void __trace_graph_return(struct trace_array *tr,
948 				 struct ftrace_graph_ret *trace,
949 				 unsigned long flags, int pc);
950 
951 #ifdef CONFIG_DYNAMIC_FTRACE
952 extern struct ftrace_hash *ftrace_graph_hash;
953 extern struct ftrace_hash *ftrace_graph_notrace_hash;
954 
955 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
956 {
957 	unsigned long addr = trace->func;
958 	int ret = 0;
959 
960 	preempt_disable_notrace();
961 
962 	if (ftrace_hash_empty(ftrace_graph_hash)) {
963 		ret = 1;
964 		goto out;
965 	}
966 
967 	if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
968 
969 		/*
970 		 * This needs to be cleared on the return functions
971 		 * when the depth is zero.
972 		 */
973 		trace_recursion_set(TRACE_GRAPH_BIT);
974 		trace_recursion_set_depth(trace->depth);
975 
976 		/*
977 		 * If no irqs are to be traced, but a set_graph_function
978 		 * is set, and called by an interrupt handler, we still
979 		 * want to trace it.
980 		 */
981 		if (in_irq())
982 			trace_recursion_set(TRACE_IRQ_BIT);
983 		else
984 			trace_recursion_clear(TRACE_IRQ_BIT);
985 		ret = 1;
986 	}
987 
988 out:
989 	preempt_enable_notrace();
990 	return ret;
991 }
992 
993 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
994 {
995 	if (trace_recursion_test(TRACE_GRAPH_BIT) &&
996 	    trace->depth == trace_recursion_depth())
997 		trace_recursion_clear(TRACE_GRAPH_BIT);
998 }
999 
1000 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1001 {
1002 	int ret = 0;
1003 
1004 	preempt_disable_notrace();
1005 
1006 	if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
1007 		ret = 1;
1008 
1009 	preempt_enable_notrace();
1010 	return ret;
1011 }
1012 #else
1013 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1014 {
1015 	return 1;
1016 }
1017 
1018 static inline int ftrace_graph_notrace_addr(unsigned long addr)
1019 {
1020 	return 0;
1021 }
1022 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1023 { }
1024 #endif /* CONFIG_DYNAMIC_FTRACE */
1025 
1026 extern unsigned int fgraph_max_depth;
1027 
1028 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1029 {
1030 	/* trace it when it is-nested-in or is a function enabled. */
1031 	return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1032 		 ftrace_graph_addr(trace)) ||
1033 		(trace->depth < 0) ||
1034 		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
1035 }
1036 
1037 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
1038 static inline enum print_line_t
1039 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1040 {
1041 	return TRACE_TYPE_UNHANDLED;
1042 }
1043 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1044 
1045 extern struct list_head ftrace_pids;
1046 
1047 #ifdef CONFIG_FUNCTION_TRACER
1048 struct ftrace_func_command {
1049 	struct list_head	list;
1050 	char			*name;
1051 	int			(*func)(struct trace_array *tr,
1052 					struct ftrace_hash *hash,
1053 					char *func, char *cmd,
1054 					char *params, int enable);
1055 };
1056 extern bool ftrace_filter_param __initdata;
1057 static inline int ftrace_trace_task(struct trace_array *tr)
1058 {
1059 	return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1060 }
1061 extern int ftrace_is_dead(void);
1062 int ftrace_create_function_files(struct trace_array *tr,
1063 				 struct dentry *parent);
1064 void ftrace_destroy_function_files(struct trace_array *tr);
1065 void ftrace_init_global_array_ops(struct trace_array *tr);
1066 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1067 void ftrace_reset_array_ops(struct trace_array *tr);
1068 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1069 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1070 				  struct dentry *d_tracer);
1071 void ftrace_clear_pids(struct trace_array *tr);
1072 int init_function_trace(void);
1073 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1074 #else
1075 static inline int ftrace_trace_task(struct trace_array *tr)
1076 {
1077 	return 1;
1078 }
1079 static inline int ftrace_is_dead(void) { return 0; }
1080 static inline int
1081 ftrace_create_function_files(struct trace_array *tr,
1082 			     struct dentry *parent)
1083 {
1084 	return 0;
1085 }
1086 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1087 static inline __init void
1088 ftrace_init_global_array_ops(struct trace_array *tr) { }
1089 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1090 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1091 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1092 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1093 static inline int init_function_trace(void) { return 0; }
1094 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1095 /* ftace_func_t type is not defined, use macro instead of static inline */
1096 #define ftrace_init_array_ops(tr, func) do { } while (0)
1097 #endif /* CONFIG_FUNCTION_TRACER */
1098 
1099 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1100 
1101 struct ftrace_probe_ops {
1102 	void			(*func)(unsigned long ip,
1103 					unsigned long parent_ip,
1104 					struct trace_array *tr,
1105 					struct ftrace_probe_ops *ops,
1106 					void *data);
1107 	int			(*init)(struct ftrace_probe_ops *ops,
1108 					struct trace_array *tr,
1109 					unsigned long ip, void *init_data,
1110 					void **data);
1111 	void			(*free)(struct ftrace_probe_ops *ops,
1112 					struct trace_array *tr,
1113 					unsigned long ip, void *data);
1114 	int			(*print)(struct seq_file *m,
1115 					 unsigned long ip,
1116 					 struct ftrace_probe_ops *ops,
1117 					 void *data);
1118 };
1119 
1120 struct ftrace_func_mapper;
1121 typedef int (*ftrace_mapper_func)(void *data);
1122 
1123 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1124 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1125 					   unsigned long ip);
1126 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1127 			       unsigned long ip, void *data);
1128 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1129 				   unsigned long ip);
1130 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1131 			     ftrace_mapper_func free_func);
1132 
1133 extern int
1134 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1135 			       struct ftrace_probe_ops *ops, void *data);
1136 extern int
1137 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1138 				      struct ftrace_probe_ops *ops);
1139 extern void clear_ftrace_function_probes(struct trace_array *tr);
1140 
1141 int register_ftrace_command(struct ftrace_func_command *cmd);
1142 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1143 
1144 void ftrace_create_filter_files(struct ftrace_ops *ops,
1145 				struct dentry *parent);
1146 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1147 #else
1148 struct ftrace_func_command;
1149 
1150 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1151 {
1152 	return -EINVAL;
1153 }
1154 static inline __init int unregister_ftrace_command(char *cmd_name)
1155 {
1156 	return -EINVAL;
1157 }
1158 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1159 {
1160 }
1161 
1162 /*
1163  * The ops parameter passed in is usually undefined.
1164  * This must be a macro.
1165  */
1166 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1167 #define ftrace_destroy_filter_files(ops) do { } while (0)
1168 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1169 
1170 bool ftrace_event_is_function(struct trace_event_call *call);
1171 
1172 /*
1173  * struct trace_parser - servers for reading the user input separated by spaces
1174  * @cont: set if the input is not complete - no final space char was found
1175  * @buffer: holds the parsed user input
1176  * @idx: user input length
1177  * @size: buffer size
1178  */
1179 struct trace_parser {
1180 	bool		cont;
1181 	char		*buffer;
1182 	unsigned	idx;
1183 	unsigned	size;
1184 };
1185 
1186 static inline bool trace_parser_loaded(struct trace_parser *parser)
1187 {
1188 	return (parser->idx != 0);
1189 }
1190 
1191 static inline bool trace_parser_cont(struct trace_parser *parser)
1192 {
1193 	return parser->cont;
1194 }
1195 
1196 static inline void trace_parser_clear(struct trace_parser *parser)
1197 {
1198 	parser->cont = false;
1199 	parser->idx = 0;
1200 }
1201 
1202 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1203 extern void trace_parser_put(struct trace_parser *parser);
1204 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1205 	size_t cnt, loff_t *ppos);
1206 
1207 /*
1208  * Only create function graph options if function graph is configured.
1209  */
1210 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1211 # define FGRAPH_FLAGS						\
1212 		C(DISPLAY_GRAPH,	"display-graph"),
1213 #else
1214 # define FGRAPH_FLAGS
1215 #endif
1216 
1217 #ifdef CONFIG_BRANCH_TRACER
1218 # define BRANCH_FLAGS					\
1219 		C(BRANCH,		"branch"),
1220 #else
1221 # define BRANCH_FLAGS
1222 #endif
1223 
1224 #ifdef CONFIG_FUNCTION_TRACER
1225 # define FUNCTION_FLAGS						\
1226 		C(FUNCTION,		"function-trace"),	\
1227 		C(FUNC_FORK,		"function-fork"),
1228 # define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
1229 #else
1230 # define FUNCTION_FLAGS
1231 # define FUNCTION_DEFAULT_FLAGS		0UL
1232 # define TRACE_ITER_FUNC_FORK		0UL
1233 #endif
1234 
1235 #ifdef CONFIG_STACKTRACE
1236 # define STACK_FLAGS				\
1237 		C(STACKTRACE,		"stacktrace"),
1238 #else
1239 # define STACK_FLAGS
1240 #endif
1241 
1242 /*
1243  * trace_iterator_flags is an enumeration that defines bit
1244  * positions into trace_flags that controls the output.
1245  *
1246  * NOTE: These bits must match the trace_options array in
1247  *       trace.c (this macro guarantees it).
1248  */
1249 #define TRACE_FLAGS						\
1250 		C(PRINT_PARENT,		"print-parent"),	\
1251 		C(SYM_OFFSET,		"sym-offset"),		\
1252 		C(SYM_ADDR,		"sym-addr"),		\
1253 		C(VERBOSE,		"verbose"),		\
1254 		C(RAW,			"raw"),			\
1255 		C(HEX,			"hex"),			\
1256 		C(BIN,			"bin"),			\
1257 		C(BLOCK,		"block"),		\
1258 		C(PRINTK,		"trace_printk"),	\
1259 		C(ANNOTATE,		"annotate"),		\
1260 		C(USERSTACKTRACE,	"userstacktrace"),	\
1261 		C(SYM_USEROBJ,		"sym-userobj"),		\
1262 		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
1263 		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
1264 		C(LATENCY_FMT,		"latency-format"),	\
1265 		C(RECORD_CMD,		"record-cmd"),		\
1266 		C(RECORD_TGID,		"record-tgid"),		\
1267 		C(OVERWRITE,		"overwrite"),		\
1268 		C(STOP_ON_FREE,		"disable_on_free"),	\
1269 		C(IRQ_INFO,		"irq-info"),		\
1270 		C(MARKERS,		"markers"),		\
1271 		C(EVENT_FORK,		"event-fork"),		\
1272 		FUNCTION_FLAGS					\
1273 		FGRAPH_FLAGS					\
1274 		STACK_FLAGS					\
1275 		BRANCH_FLAGS
1276 
1277 /*
1278  * By defining C, we can make TRACE_FLAGS a list of bit names
1279  * that will define the bits for the flag masks.
1280  */
1281 #undef C
1282 #define C(a, b) TRACE_ITER_##a##_BIT
1283 
1284 enum trace_iterator_bits {
1285 	TRACE_FLAGS
1286 	/* Make sure we don't go more than we have bits for */
1287 	TRACE_ITER_LAST_BIT
1288 };
1289 
1290 /*
1291  * By redefining C, we can make TRACE_FLAGS a list of masks that
1292  * use the bits as defined above.
1293  */
1294 #undef C
1295 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1296 
1297 enum trace_iterator_flags { TRACE_FLAGS };
1298 
1299 /*
1300  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1301  * control the output of kernel symbols.
1302  */
1303 #define TRACE_ITER_SYM_MASK \
1304 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1305 
1306 extern struct tracer nop_trace;
1307 
1308 #ifdef CONFIG_BRANCH_TRACER
1309 extern int enable_branch_tracing(struct trace_array *tr);
1310 extern void disable_branch_tracing(void);
1311 static inline int trace_branch_enable(struct trace_array *tr)
1312 {
1313 	if (tr->trace_flags & TRACE_ITER_BRANCH)
1314 		return enable_branch_tracing(tr);
1315 	return 0;
1316 }
1317 static inline void trace_branch_disable(void)
1318 {
1319 	/* due to races, always disable */
1320 	disable_branch_tracing();
1321 }
1322 #else
1323 static inline int trace_branch_enable(struct trace_array *tr)
1324 {
1325 	return 0;
1326 }
1327 static inline void trace_branch_disable(void)
1328 {
1329 }
1330 #endif /* CONFIG_BRANCH_TRACER */
1331 
1332 /* set ring buffers to default size if not already done so */
1333 int tracing_update_buffers(void);
1334 
1335 struct ftrace_event_field {
1336 	struct list_head	link;
1337 	const char		*name;
1338 	const char		*type;
1339 	int			filter_type;
1340 	int			offset;
1341 	int			size;
1342 	int			is_signed;
1343 };
1344 
1345 struct prog_entry;
1346 
1347 struct event_filter {
1348 	struct prog_entry __rcu	*prog;
1349 	char			*filter_string;
1350 };
1351 
1352 struct event_subsystem {
1353 	struct list_head	list;
1354 	const char		*name;
1355 	struct event_filter	*filter;
1356 	int			ref_count;
1357 };
1358 
1359 struct trace_subsystem_dir {
1360 	struct list_head		list;
1361 	struct event_subsystem		*subsystem;
1362 	struct trace_array		*tr;
1363 	struct dentry			*entry;
1364 	int				ref_count;
1365 	int				nr_events;
1366 };
1367 
1368 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1369 				     struct ring_buffer *buffer,
1370 				     struct ring_buffer_event *event);
1371 
1372 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1373 				     struct ring_buffer *buffer,
1374 				     struct ring_buffer_event *event,
1375 				     unsigned long flags, int pc,
1376 				     struct pt_regs *regs);
1377 
1378 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1379 					      struct ring_buffer *buffer,
1380 					      struct ring_buffer_event *event,
1381 					      unsigned long flags, int pc)
1382 {
1383 	trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1384 }
1385 
1386 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1387 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1388 void trace_buffered_event_disable(void);
1389 void trace_buffered_event_enable(void);
1390 
1391 static inline void
1392 __trace_event_discard_commit(struct ring_buffer *buffer,
1393 			     struct ring_buffer_event *event)
1394 {
1395 	if (this_cpu_read(trace_buffered_event) == event) {
1396 		/* Simply release the temp buffer */
1397 		this_cpu_dec(trace_buffered_event_cnt);
1398 		return;
1399 	}
1400 	ring_buffer_discard_commit(buffer, event);
1401 }
1402 
1403 /*
1404  * Helper function for event_trigger_unlock_commit{_regs}().
1405  * If there are event triggers attached to this event that requires
1406  * filtering against its fields, then they wil be called as the
1407  * entry already holds the field information of the current event.
1408  *
1409  * It also checks if the event should be discarded or not.
1410  * It is to be discarded if the event is soft disabled and the
1411  * event was only recorded to process triggers, or if the event
1412  * filter is active and this event did not match the filters.
1413  *
1414  * Returns true if the event is discarded, false otherwise.
1415  */
1416 static inline bool
1417 __event_trigger_test_discard(struct trace_event_file *file,
1418 			     struct ring_buffer *buffer,
1419 			     struct ring_buffer_event *event,
1420 			     void *entry,
1421 			     enum event_trigger_type *tt)
1422 {
1423 	unsigned long eflags = file->flags;
1424 
1425 	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1426 		*tt = event_triggers_call(file, entry, event);
1427 
1428 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1429 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1430 	     !filter_match_preds(file->filter, entry))) {
1431 		__trace_event_discard_commit(buffer, event);
1432 		return true;
1433 	}
1434 
1435 	return false;
1436 }
1437 
1438 /**
1439  * event_trigger_unlock_commit - handle triggers and finish event commit
1440  * @file: The file pointer assoctiated to the event
1441  * @buffer: The ring buffer that the event is being written to
1442  * @event: The event meta data in the ring buffer
1443  * @entry: The event itself
1444  * @irq_flags: The state of the interrupts at the start of the event
1445  * @pc: The state of the preempt count at the start of the event.
1446  *
1447  * This is a helper function to handle triggers that require data
1448  * from the event itself. It also tests the event against filters and
1449  * if the event is soft disabled and should be discarded.
1450  */
1451 static inline void
1452 event_trigger_unlock_commit(struct trace_event_file *file,
1453 			    struct ring_buffer *buffer,
1454 			    struct ring_buffer_event *event,
1455 			    void *entry, unsigned long irq_flags, int pc)
1456 {
1457 	enum event_trigger_type tt = ETT_NONE;
1458 
1459 	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1460 		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1461 
1462 	if (tt)
1463 		event_triggers_post_call(file, tt);
1464 }
1465 
1466 /**
1467  * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1468  * @file: The file pointer assoctiated to the event
1469  * @buffer: The ring buffer that the event is being written to
1470  * @event: The event meta data in the ring buffer
1471  * @entry: The event itself
1472  * @irq_flags: The state of the interrupts at the start of the event
1473  * @pc: The state of the preempt count at the start of the event.
1474  *
1475  * This is a helper function to handle triggers that require data
1476  * from the event itself. It also tests the event against filters and
1477  * if the event is soft disabled and should be discarded.
1478  *
1479  * Same as event_trigger_unlock_commit() but calls
1480  * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1481  */
1482 static inline void
1483 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1484 				 struct ring_buffer *buffer,
1485 				 struct ring_buffer_event *event,
1486 				 void *entry, unsigned long irq_flags, int pc,
1487 				 struct pt_regs *regs)
1488 {
1489 	enum event_trigger_type tt = ETT_NONE;
1490 
1491 	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1492 		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1493 						irq_flags, pc, regs);
1494 
1495 	if (tt)
1496 		event_triggers_post_call(file, tt);
1497 }
1498 
1499 #define FILTER_PRED_INVALID	((unsigned short)-1)
1500 #define FILTER_PRED_IS_RIGHT	(1 << 15)
1501 #define FILTER_PRED_FOLD	(1 << 15)
1502 
1503 /*
1504  * The max preds is the size of unsigned short with
1505  * two flags at the MSBs. One bit is used for both the IS_RIGHT
1506  * and FOLD flags. The other is reserved.
1507  *
1508  * 2^14 preds is way more than enough.
1509  */
1510 #define MAX_FILTER_PRED		16384
1511 
1512 struct filter_pred;
1513 struct regex;
1514 
1515 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1516 
1517 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1518 
1519 enum regex_type {
1520 	MATCH_FULL = 0,
1521 	MATCH_FRONT_ONLY,
1522 	MATCH_MIDDLE_ONLY,
1523 	MATCH_END_ONLY,
1524 	MATCH_GLOB,
1525 	MATCH_INDEX,
1526 };
1527 
1528 struct regex {
1529 	char			pattern[MAX_FILTER_STR_VAL];
1530 	int			len;
1531 	int			field_len;
1532 	regex_match_func	match;
1533 };
1534 
1535 struct filter_pred {
1536 	filter_pred_fn_t 	fn;
1537 	u64 			val;
1538 	struct regex		regex;
1539 	unsigned short		*ops;
1540 	struct ftrace_event_field *field;
1541 	int 			offset;
1542 	int			not;
1543 	int 			op;
1544 };
1545 
1546 static inline bool is_string_field(struct ftrace_event_field *field)
1547 {
1548 	return field->filter_type == FILTER_DYN_STRING ||
1549 	       field->filter_type == FILTER_STATIC_STRING ||
1550 	       field->filter_type == FILTER_PTR_STRING ||
1551 	       field->filter_type == FILTER_COMM;
1552 }
1553 
1554 static inline bool is_function_field(struct ftrace_event_field *field)
1555 {
1556 	return field->filter_type == FILTER_TRACE_FN;
1557 }
1558 
1559 extern enum regex_type
1560 filter_parse_regex(char *buff, int len, char **search, int *not);
1561 extern void print_event_filter(struct trace_event_file *file,
1562 			       struct trace_seq *s);
1563 extern int apply_event_filter(struct trace_event_file *file,
1564 			      char *filter_string);
1565 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1566 					char *filter_string);
1567 extern void print_subsystem_event_filter(struct event_subsystem *system,
1568 					 struct trace_seq *s);
1569 extern int filter_assign_type(const char *type);
1570 extern int create_event_filter(struct trace_array *tr,
1571 			       struct trace_event_call *call,
1572 			       char *filter_str, bool set_str,
1573 			       struct event_filter **filterp);
1574 extern void free_event_filter(struct event_filter *filter);
1575 
1576 struct ftrace_event_field *
1577 trace_find_event_field(struct trace_event_call *call, char *name);
1578 
1579 extern void trace_event_enable_cmd_record(bool enable);
1580 extern void trace_event_enable_tgid_record(bool enable);
1581 
1582 extern int event_trace_init(void);
1583 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1584 extern int event_trace_del_tracer(struct trace_array *tr);
1585 
1586 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1587 						  const char *system,
1588 						  const char *event);
1589 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1590 						const char *system,
1591 						const char *event);
1592 
1593 static inline void *event_file_data(struct file *filp)
1594 {
1595 	return READ_ONCE(file_inode(filp)->i_private);
1596 }
1597 
1598 extern struct mutex event_mutex;
1599 extern struct list_head ftrace_events;
1600 
1601 extern const struct file_operations event_trigger_fops;
1602 extern const struct file_operations event_hist_fops;
1603 extern const struct file_operations event_inject_fops;
1604 
1605 #ifdef CONFIG_HIST_TRIGGERS
1606 extern int register_trigger_hist_cmd(void);
1607 extern int register_trigger_hist_enable_disable_cmds(void);
1608 #else
1609 static inline int register_trigger_hist_cmd(void) { return 0; }
1610 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1611 #endif
1612 
1613 extern int register_trigger_cmds(void);
1614 extern void clear_event_triggers(struct trace_array *tr);
1615 
1616 struct event_trigger_data {
1617 	unsigned long			count;
1618 	int				ref;
1619 	struct event_trigger_ops	*ops;
1620 	struct event_command		*cmd_ops;
1621 	struct event_filter __rcu	*filter;
1622 	char				*filter_str;
1623 	void				*private_data;
1624 	bool				paused;
1625 	bool				paused_tmp;
1626 	struct list_head		list;
1627 	char				*name;
1628 	struct list_head		named_list;
1629 	struct event_trigger_data	*named_data;
1630 };
1631 
1632 /* Avoid typos */
1633 #define ENABLE_EVENT_STR	"enable_event"
1634 #define DISABLE_EVENT_STR	"disable_event"
1635 #define ENABLE_HIST_STR		"enable_hist"
1636 #define DISABLE_HIST_STR	"disable_hist"
1637 
1638 struct enable_trigger_data {
1639 	struct trace_event_file		*file;
1640 	bool				enable;
1641 	bool				hist;
1642 };
1643 
1644 extern int event_enable_trigger_print(struct seq_file *m,
1645 				      struct event_trigger_ops *ops,
1646 				      struct event_trigger_data *data);
1647 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1648 				      struct event_trigger_data *data);
1649 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1650 				     struct trace_event_file *file,
1651 				     char *glob, char *cmd, char *param);
1652 extern int event_enable_register_trigger(char *glob,
1653 					 struct event_trigger_ops *ops,
1654 					 struct event_trigger_data *data,
1655 					 struct trace_event_file *file);
1656 extern void event_enable_unregister_trigger(char *glob,
1657 					    struct event_trigger_ops *ops,
1658 					    struct event_trigger_data *test,
1659 					    struct trace_event_file *file);
1660 extern void trigger_data_free(struct event_trigger_data *data);
1661 extern int event_trigger_init(struct event_trigger_ops *ops,
1662 			      struct event_trigger_data *data);
1663 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1664 					      int trigger_enable);
1665 extern void update_cond_flag(struct trace_event_file *file);
1666 extern int set_trigger_filter(char *filter_str,
1667 			      struct event_trigger_data *trigger_data,
1668 			      struct trace_event_file *file);
1669 extern struct event_trigger_data *find_named_trigger(const char *name);
1670 extern bool is_named_trigger(struct event_trigger_data *test);
1671 extern int save_named_trigger(const char *name,
1672 			      struct event_trigger_data *data);
1673 extern void del_named_trigger(struct event_trigger_data *data);
1674 extern void pause_named_trigger(struct event_trigger_data *data);
1675 extern void unpause_named_trigger(struct event_trigger_data *data);
1676 extern void set_named_trigger_data(struct event_trigger_data *data,
1677 				   struct event_trigger_data *named_data);
1678 extern struct event_trigger_data *
1679 get_named_trigger_data(struct event_trigger_data *data);
1680 extern int register_event_command(struct event_command *cmd);
1681 extern int unregister_event_command(struct event_command *cmd);
1682 extern int register_trigger_hist_enable_disable_cmds(void);
1683 
1684 /**
1685  * struct event_trigger_ops - callbacks for trace event triggers
1686  *
1687  * The methods in this structure provide per-event trigger hooks for
1688  * various trigger operations.
1689  *
1690  * All the methods below, except for @init() and @free(), must be
1691  * implemented.
1692  *
1693  * @func: The trigger 'probe' function called when the triggering
1694  *	event occurs.  The data passed into this callback is the data
1695  *	that was supplied to the event_command @reg() function that
1696  *	registered the trigger (see struct event_command) along with
1697  *	the trace record, rec.
1698  *
1699  * @init: An optional initialization function called for the trigger
1700  *	when the trigger is registered (via the event_command reg()
1701  *	function).  This can be used to perform per-trigger
1702  *	initialization such as incrementing a per-trigger reference
1703  *	count, for instance.  This is usually implemented by the
1704  *	generic utility function @event_trigger_init() (see
1705  *	trace_event_triggers.c).
1706  *
1707  * @free: An optional de-initialization function called for the
1708  *	trigger when the trigger is unregistered (via the
1709  *	event_command @reg() function).  This can be used to perform
1710  *	per-trigger de-initialization such as decrementing a
1711  *	per-trigger reference count and freeing corresponding trigger
1712  *	data, for instance.  This is usually implemented by the
1713  *	generic utility function @event_trigger_free() (see
1714  *	trace_event_triggers.c).
1715  *
1716  * @print: The callback function invoked to have the trigger print
1717  *	itself.  This is usually implemented by a wrapper function
1718  *	that calls the generic utility function @event_trigger_print()
1719  *	(see trace_event_triggers.c).
1720  */
1721 struct event_trigger_ops {
1722 	void			(*func)(struct event_trigger_data *data,
1723 					void *rec,
1724 					struct ring_buffer_event *rbe);
1725 	int			(*init)(struct event_trigger_ops *ops,
1726 					struct event_trigger_data *data);
1727 	void			(*free)(struct event_trigger_ops *ops,
1728 					struct event_trigger_data *data);
1729 	int			(*print)(struct seq_file *m,
1730 					 struct event_trigger_ops *ops,
1731 					 struct event_trigger_data *data);
1732 };
1733 
1734 /**
1735  * struct event_command - callbacks and data members for event commands
1736  *
1737  * Event commands are invoked by users by writing the command name
1738  * into the 'trigger' file associated with a trace event.  The
1739  * parameters associated with a specific invocation of an event
1740  * command are used to create an event trigger instance, which is
1741  * added to the list of trigger instances associated with that trace
1742  * event.  When the event is hit, the set of triggers associated with
1743  * that event is invoked.
1744  *
1745  * The data members in this structure provide per-event command data
1746  * for various event commands.
1747  *
1748  * All the data members below, except for @post_trigger, must be set
1749  * for each event command.
1750  *
1751  * @name: The unique name that identifies the event command.  This is
1752  *	the name used when setting triggers via trigger files.
1753  *
1754  * @trigger_type: A unique id that identifies the event command
1755  *	'type'.  This value has two purposes, the first to ensure that
1756  *	only one trigger of the same type can be set at a given time
1757  *	for a particular event e.g. it doesn't make sense to have both
1758  *	a traceon and traceoff trigger attached to a single event at
1759  *	the same time, so traceon and traceoff have the same type
1760  *	though they have different names.  The @trigger_type value is
1761  *	also used as a bit value for deferring the actual trigger
1762  *	action until after the current event is finished.  Some
1763  *	commands need to do this if they themselves log to the trace
1764  *	buffer (see the @post_trigger() member below).  @trigger_type
1765  *	values are defined by adding new values to the trigger_type
1766  *	enum in include/linux/trace_events.h.
1767  *
1768  * @flags: See the enum event_command_flags below.
1769  *
1770  * All the methods below, except for @set_filter() and @unreg_all(),
1771  * must be implemented.
1772  *
1773  * @func: The callback function responsible for parsing and
1774  *	registering the trigger written to the 'trigger' file by the
1775  *	user.  It allocates the trigger instance and registers it with
1776  *	the appropriate trace event.  It makes use of the other
1777  *	event_command callback functions to orchestrate this, and is
1778  *	usually implemented by the generic utility function
1779  *	@event_trigger_callback() (see trace_event_triggers.c).
1780  *
1781  * @reg: Adds the trigger to the list of triggers associated with the
1782  *	event, and enables the event trigger itself, after
1783  *	initializing it (via the event_trigger_ops @init() function).
1784  *	This is also where commands can use the @trigger_type value to
1785  *	make the decision as to whether or not multiple instances of
1786  *	the trigger should be allowed.  This is usually implemented by
1787  *	the generic utility function @register_trigger() (see
1788  *	trace_event_triggers.c).
1789  *
1790  * @unreg: Removes the trigger from the list of triggers associated
1791  *	with the event, and disables the event trigger itself, after
1792  *	initializing it (via the event_trigger_ops @free() function).
1793  *	This is usually implemented by the generic utility function
1794  *	@unregister_trigger() (see trace_event_triggers.c).
1795  *
1796  * @unreg_all: An optional function called to remove all the triggers
1797  *	from the list of triggers associated with the event.  Called
1798  *	when a trigger file is opened in truncate mode.
1799  *
1800  * @set_filter: An optional function called to parse and set a filter
1801  *	for the trigger.  If no @set_filter() method is set for the
1802  *	event command, filters set by the user for the command will be
1803  *	ignored.  This is usually implemented by the generic utility
1804  *	function @set_trigger_filter() (see trace_event_triggers.c).
1805  *
1806  * @get_trigger_ops: The callback function invoked to retrieve the
1807  *	event_trigger_ops implementation associated with the command.
1808  */
1809 struct event_command {
1810 	struct list_head	list;
1811 	char			*name;
1812 	enum event_trigger_type	trigger_type;
1813 	int			flags;
1814 	int			(*func)(struct event_command *cmd_ops,
1815 					struct trace_event_file *file,
1816 					char *glob, char *cmd, char *params);
1817 	int			(*reg)(char *glob,
1818 				       struct event_trigger_ops *ops,
1819 				       struct event_trigger_data *data,
1820 				       struct trace_event_file *file);
1821 	void			(*unreg)(char *glob,
1822 					 struct event_trigger_ops *ops,
1823 					 struct event_trigger_data *data,
1824 					 struct trace_event_file *file);
1825 	void			(*unreg_all)(struct trace_event_file *file);
1826 	int			(*set_filter)(char *filter_str,
1827 					      struct event_trigger_data *data,
1828 					      struct trace_event_file *file);
1829 	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1830 };
1831 
1832 /**
1833  * enum event_command_flags - flags for struct event_command
1834  *
1835  * @POST_TRIGGER: A flag that says whether or not this command needs
1836  *	to have its action delayed until after the current event has
1837  *	been closed.  Some triggers need to avoid being invoked while
1838  *	an event is currently in the process of being logged, since
1839  *	the trigger may itself log data into the trace buffer.  Thus
1840  *	we make sure the current event is committed before invoking
1841  *	those triggers.  To do that, the trigger invocation is split
1842  *	in two - the first part checks the filter using the current
1843  *	trace record; if a command has the @post_trigger flag set, it
1844  *	sets a bit for itself in the return value, otherwise it
1845  *	directly invokes the trigger.  Once all commands have been
1846  *	either invoked or set their return flag, the current record is
1847  *	either committed or discarded.  At that point, if any commands
1848  *	have deferred their triggers, those commands are finally
1849  *	invoked following the close of the current event.  In other
1850  *	words, if the event_trigger_ops @func() probe implementation
1851  *	itself logs to the trace buffer, this flag should be set,
1852  *	otherwise it can be left unspecified.
1853  *
1854  * @NEEDS_REC: A flag that says whether or not this command needs
1855  *	access to the trace record in order to perform its function,
1856  *	regardless of whether or not it has a filter associated with
1857  *	it (filters make a trigger require access to the trace record
1858  *	but are not always present).
1859  */
1860 enum event_command_flags {
1861 	EVENT_CMD_FL_POST_TRIGGER	= 1,
1862 	EVENT_CMD_FL_NEEDS_REC		= 2,
1863 };
1864 
1865 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1866 {
1867 	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1868 }
1869 
1870 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1871 {
1872 	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1873 }
1874 
1875 extern int trace_event_enable_disable(struct trace_event_file *file,
1876 				      int enable, int soft_disable);
1877 extern int tracing_alloc_snapshot(void);
1878 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1879 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1880 
1881 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1882 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1883 
1884 extern const char *__start___trace_bprintk_fmt[];
1885 extern const char *__stop___trace_bprintk_fmt[];
1886 
1887 extern const char *__start___tracepoint_str[];
1888 extern const char *__stop___tracepoint_str[];
1889 
1890 void trace_printk_control(bool enabled);
1891 void trace_printk_start_comm(void);
1892 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1893 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1894 
1895 #define MAX_EVENT_NAME_LEN	64
1896 
1897 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1898 extern ssize_t trace_parse_run_command(struct file *file,
1899 		const char __user *buffer, size_t count, loff_t *ppos,
1900 		int (*createfn)(int, char**));
1901 
1902 extern unsigned int err_pos(char *cmd, const char *str);
1903 extern void tracing_log_err(struct trace_array *tr,
1904 			    const char *loc, const char *cmd,
1905 			    const char **errs, u8 type, u8 pos);
1906 
1907 /*
1908  * Normal trace_printk() and friends allocates special buffers
1909  * to do the manipulation, as well as saves the print formats
1910  * into sections to display. But the trace infrastructure wants
1911  * to use these without the added overhead at the price of being
1912  * a bit slower (used mainly for warnings, where we don't care
1913  * about performance). The internal_trace_puts() is for such
1914  * a purpose.
1915  */
1916 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1917 
1918 #undef FTRACE_ENTRY
1919 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)	\
1920 	extern struct trace_event_call					\
1921 	__aligned(4) event_##call;
1922 #undef FTRACE_ENTRY_DUP
1923 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)	\
1924 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1925 #undef FTRACE_ENTRY_PACKED
1926 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1927 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1928 
1929 #include "trace_entries.h"
1930 
1931 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1932 int perf_ftrace_event_register(struct trace_event_call *call,
1933 			       enum trace_reg type, void *data);
1934 #else
1935 #define perf_ftrace_event_register NULL
1936 #endif
1937 
1938 #ifdef CONFIG_FTRACE_SYSCALLS
1939 void init_ftrace_syscalls(void);
1940 const char *get_syscall_name(int syscall);
1941 #else
1942 static inline void init_ftrace_syscalls(void) { }
1943 static inline const char *get_syscall_name(int syscall)
1944 {
1945 	return NULL;
1946 }
1947 #endif
1948 
1949 #ifdef CONFIG_EVENT_TRACING
1950 void trace_event_init(void);
1951 void trace_event_eval_update(struct trace_eval_map **map, int len);
1952 #else
1953 static inline void __init trace_event_init(void) { }
1954 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1955 #endif
1956 
1957 #ifdef CONFIG_TRACER_SNAPSHOT
1958 void tracing_snapshot_instance(struct trace_array *tr);
1959 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1960 #else
1961 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1962 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1963 {
1964 	return 0;
1965 }
1966 #endif
1967 
1968 #ifdef CONFIG_PREEMPT_TRACER
1969 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1970 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1971 #else
1972 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1973 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1974 #endif
1975 #ifdef CONFIG_IRQSOFF_TRACER
1976 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1977 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1978 #else
1979 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1980 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1981 #endif
1982 
1983 extern struct trace_iterator *tracepoint_print_iter;
1984 
1985 /*
1986  * Reset the state of the trace_iterator so that it can read consumed data.
1987  * Normally, the trace_iterator is used for reading the data when it is not
1988  * consumed, and must retain state.
1989  */
1990 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1991 {
1992 	const size_t offset = offsetof(struct trace_iterator, seq);
1993 
1994 	/*
1995 	 * Keep gcc from complaining about overwriting more than just one
1996 	 * member in the structure.
1997 	 */
1998 	memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1999 
2000 	iter->pos = -1;
2001 }
2002 
2003 #endif /* _LINUX_KERNEL_TRACE_H */
2004