xref: /openbmc/linux/kernel/trace/trace.h (revision ddeea494a16f32522bce16ee65f191d05d4b8282)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5 
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/trace.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/trace_seq.h>
17 #include <linux/trace_events.h>
18 #include <linux/compiler.h>
19 #include <linux/glob.h>
20 #include <linux/irq_work.h>
21 #include <linux/workqueue.h>
22 #include <linux/ctype.h>
23 #include <linux/once_lite.h>
24 
25 #include "pid_list.h"
26 
27 #ifdef CONFIG_FTRACE_SYSCALLS
28 #include <asm/unistd.h>		/* For NR_syscalls	     */
29 #include <asm/syscall.h>	/* some archs define it here */
30 #endif
31 
32 #define TRACE_MODE_WRITE	0640
33 #define TRACE_MODE_READ		0440
34 
35 enum trace_type {
36 	__TRACE_FIRST_TYPE = 0,
37 
38 	TRACE_FN,
39 	TRACE_CTX,
40 	TRACE_WAKE,
41 	TRACE_STACK,
42 	TRACE_PRINT,
43 	TRACE_BPRINT,
44 	TRACE_MMIO_RW,
45 	TRACE_MMIO_MAP,
46 	TRACE_BRANCH,
47 	TRACE_GRAPH_RET,
48 	TRACE_GRAPH_ENT,
49 	TRACE_USER_STACK,
50 	TRACE_BLK,
51 	TRACE_BPUTS,
52 	TRACE_HWLAT,
53 	TRACE_OSNOISE,
54 	TRACE_TIMERLAT,
55 	TRACE_RAW_DATA,
56 	TRACE_FUNC_REPEATS,
57 
58 	__TRACE_LAST_TYPE,
59 };
60 
61 
62 #undef __field
63 #define __field(type, item)		type	item;
64 
65 #undef __field_fn
66 #define __field_fn(type, item)		type	item;
67 
68 #undef __field_struct
69 #define __field_struct(type, item)	__field(type, item)
70 
71 #undef __field_desc
72 #define __field_desc(type, container, item)
73 
74 #undef __field_packed
75 #define __field_packed(type, container, item)
76 
77 #undef __array
78 #define __array(type, item, size)	type	item[size];
79 
80 #undef __array_desc
81 #define __array_desc(type, container, item, size)
82 
83 #undef __dynamic_array
84 #define __dynamic_array(type, item)	type	item[];
85 
86 #undef __rel_dynamic_array
87 #define __rel_dynamic_array(type, item)	type	item[];
88 
89 #undef F_STRUCT
90 #define F_STRUCT(args...)		args
91 
92 #undef FTRACE_ENTRY
93 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print)		\
94 	struct struct_name {						\
95 		struct trace_entry	ent;				\
96 		tstruct							\
97 	}
98 
99 #undef FTRACE_ENTRY_DUP
100 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
101 
102 #undef FTRACE_ENTRY_REG
103 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	regfn)	\
104 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
105 
106 #undef FTRACE_ENTRY_PACKED
107 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)	\
108 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
109 
110 #include "trace_entries.h"
111 
112 /* Use this for memory failure errors */
113 #define MEM_FAIL(condition, fmt, ...)					\
114 	DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
115 
116 #define FAULT_STRING "(fault)"
117 
118 #define HIST_STACKTRACE_DEPTH	16
119 #define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
120 #define HIST_STACKTRACE_SKIP	5
121 
122 /*
123  * syscalls are special, and need special handling, this is why
124  * they are not included in trace_entries.h
125  */
126 struct syscall_trace_enter {
127 	struct trace_entry	ent;
128 	int			nr;
129 	unsigned long		args[];
130 };
131 
132 struct syscall_trace_exit {
133 	struct trace_entry	ent;
134 	int			nr;
135 	long			ret;
136 };
137 
138 struct kprobe_trace_entry_head {
139 	struct trace_entry	ent;
140 	unsigned long		ip;
141 };
142 
143 struct eprobe_trace_entry_head {
144 	struct trace_entry	ent;
145 };
146 
147 struct kretprobe_trace_entry_head {
148 	struct trace_entry	ent;
149 	unsigned long		func;
150 	unsigned long		ret_ip;
151 };
152 
153 struct fentry_trace_entry_head {
154 	struct trace_entry	ent;
155 	unsigned long		ip;
156 };
157 
158 struct fexit_trace_entry_head {
159 	struct trace_entry	ent;
160 	unsigned long		func;
161 	unsigned long		ret_ip;
162 };
163 
164 #define TRACE_BUF_SIZE		1024
165 
166 struct trace_array;
167 
168 /*
169  * The CPU trace array - it consists of thousands of trace entries
170  * plus some other descriptor data: (for example which task started
171  * the trace, etc.)
172  */
173 struct trace_array_cpu {
174 	atomic_t		disabled;
175 	void			*buffer_page;	/* ring buffer spare */
176 
177 	unsigned long		entries;
178 	unsigned long		saved_latency;
179 	unsigned long		critical_start;
180 	unsigned long		critical_end;
181 	unsigned long		critical_sequence;
182 	unsigned long		nice;
183 	unsigned long		policy;
184 	unsigned long		rt_priority;
185 	unsigned long		skipped_entries;
186 	u64			preempt_timestamp;
187 	pid_t			pid;
188 	kuid_t			uid;
189 	char			comm[TASK_COMM_LEN];
190 
191 #ifdef CONFIG_FUNCTION_TRACER
192 	int			ftrace_ignore_pid;
193 #endif
194 	bool			ignore_pid;
195 };
196 
197 struct tracer;
198 struct trace_option_dentry;
199 
200 struct array_buffer {
201 	struct trace_array		*tr;
202 	struct trace_buffer		*buffer;
203 	struct trace_array_cpu __percpu	*data;
204 	u64				time_start;
205 	int				cpu;
206 };
207 
208 #define TRACE_FLAGS_MAX_SIZE		32
209 
210 struct trace_options {
211 	struct tracer			*tracer;
212 	struct trace_option_dentry	*topts;
213 };
214 
215 struct trace_pid_list *trace_pid_list_alloc(void);
216 void trace_pid_list_free(struct trace_pid_list *pid_list);
217 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
218 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
219 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
220 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
221 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
222 			unsigned int *next);
223 
224 enum {
225 	TRACE_PIDS		= BIT(0),
226 	TRACE_NO_PIDS		= BIT(1),
227 };
228 
229 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
230 				    struct trace_pid_list *no_pid_list)
231 {
232 	/* Return true if the pid list in type has pids */
233 	return ((type & TRACE_PIDS) && pid_list) ||
234 		((type & TRACE_NO_PIDS) && no_pid_list);
235 }
236 
237 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
238 					 struct trace_pid_list *no_pid_list)
239 {
240 	/*
241 	 * Turning off what is in @type, return true if the "other"
242 	 * pid list, still has pids in it.
243 	 */
244 	return (!(type & TRACE_PIDS) && pid_list) ||
245 		(!(type & TRACE_NO_PIDS) && no_pid_list);
246 }
247 
248 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
249 
250 /**
251  * struct cond_snapshot - conditional snapshot data and callback
252  *
253  * The cond_snapshot structure encapsulates a callback function and
254  * data associated with the snapshot for a given tracing instance.
255  *
256  * When a snapshot is taken conditionally, by invoking
257  * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
258  * passed in turn to the cond_snapshot.update() function.  That data
259  * can be compared by the update() implementation with the cond_data
260  * contained within the struct cond_snapshot instance associated with
261  * the trace_array.  Because the tr->max_lock is held throughout the
262  * update() call, the update() function can directly retrieve the
263  * cond_snapshot and cond_data associated with the per-instance
264  * snapshot associated with the trace_array.
265  *
266  * The cond_snapshot.update() implementation can save data to be
267  * associated with the snapshot if it decides to, and returns 'true'
268  * in that case, or it returns 'false' if the conditional snapshot
269  * shouldn't be taken.
270  *
271  * The cond_snapshot instance is created and associated with the
272  * user-defined cond_data by tracing_cond_snapshot_enable().
273  * Likewise, the cond_snapshot instance is destroyed and is no longer
274  * associated with the trace instance by
275  * tracing_cond_snapshot_disable().
276  *
277  * The method below is required.
278  *
279  * @update: When a conditional snapshot is invoked, the update()
280  *	callback function is invoked with the tr->max_lock held.  The
281  *	update() implementation signals whether or not to actually
282  *	take the snapshot, by returning 'true' if so, 'false' if no
283  *	snapshot should be taken.  Because the max_lock is held for
284  *	the duration of update(), the implementation is safe to
285  *	directly retrieved and save any implementation data it needs
286  *	to in association with the snapshot.
287  */
288 struct cond_snapshot {
289 	void				*cond_data;
290 	cond_update_fn_t		update;
291 };
292 
293 /*
294  * struct trace_func_repeats - used to keep track of the consecutive
295  * (on the same CPU) calls of a single function.
296  */
297 struct trace_func_repeats {
298 	unsigned long	ip;
299 	unsigned long	parent_ip;
300 	unsigned long	count;
301 	u64		ts_last_call;
302 };
303 
304 /*
305  * The trace array - an array of per-CPU trace arrays. This is the
306  * highest level data structure that individual tracers deal with.
307  * They have on/off state as well:
308  */
309 struct trace_array {
310 	struct list_head	list;
311 	char			*name;
312 	struct array_buffer	array_buffer;
313 #ifdef CONFIG_TRACER_MAX_TRACE
314 	/*
315 	 * The max_buffer is used to snapshot the trace when a maximum
316 	 * latency is reached, or when the user initiates a snapshot.
317 	 * Some tracers will use this to store a maximum trace while
318 	 * it continues examining live traces.
319 	 *
320 	 * The buffers for the max_buffer are set up the same as the array_buffer
321 	 * When a snapshot is taken, the buffer of the max_buffer is swapped
322 	 * with the buffer of the array_buffer and the buffers are reset for
323 	 * the array_buffer so the tracing can continue.
324 	 */
325 	struct array_buffer	max_buffer;
326 	bool			allocated_snapshot;
327 #endif
328 #ifdef CONFIG_TRACER_MAX_TRACE
329 	unsigned long		max_latency;
330 #ifdef CONFIG_FSNOTIFY
331 	struct dentry		*d_max_latency;
332 	struct work_struct	fsnotify_work;
333 	struct irq_work		fsnotify_irqwork;
334 #endif
335 #endif
336 	struct trace_pid_list	__rcu *filtered_pids;
337 	struct trace_pid_list	__rcu *filtered_no_pids;
338 	/*
339 	 * max_lock is used to protect the swapping of buffers
340 	 * when taking a max snapshot. The buffers themselves are
341 	 * protected by per_cpu spinlocks. But the action of the swap
342 	 * needs its own lock.
343 	 *
344 	 * This is defined as a arch_spinlock_t in order to help
345 	 * with performance when lockdep debugging is enabled.
346 	 *
347 	 * It is also used in other places outside the update_max_tr
348 	 * so it needs to be defined outside of the
349 	 * CONFIG_TRACER_MAX_TRACE.
350 	 */
351 	arch_spinlock_t		max_lock;
352 	int			buffer_disabled;
353 #ifdef CONFIG_FTRACE_SYSCALLS
354 	int			sys_refcount_enter;
355 	int			sys_refcount_exit;
356 	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
357 	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
358 #endif
359 	int			stop_count;
360 	int			clock_id;
361 	int			nr_topts;
362 	bool			clear_trace;
363 	int			buffer_percent;
364 	unsigned int		n_err_log_entries;
365 	struct tracer		*current_trace;
366 	unsigned int		trace_flags;
367 	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
368 	unsigned int		flags;
369 	raw_spinlock_t		start_lock;
370 	struct list_head	err_log;
371 	struct dentry		*dir;
372 	struct dentry		*options;
373 	struct dentry		*percpu_dir;
374 	struct dentry		*event_dir;
375 	struct trace_options	*topts;
376 	struct list_head	systems;
377 	struct list_head	events;
378 	struct trace_event_file *trace_marker_file;
379 	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
380 	int			ref;
381 	int			trace_ref;
382 #ifdef CONFIG_FUNCTION_TRACER
383 	struct ftrace_ops	*ops;
384 	struct trace_pid_list	__rcu *function_pids;
385 	struct trace_pid_list	__rcu *function_no_pids;
386 #ifdef CONFIG_DYNAMIC_FTRACE
387 	/* All of these are protected by the ftrace_lock */
388 	struct list_head	func_probes;
389 	struct list_head	mod_trace;
390 	struct list_head	mod_notrace;
391 #endif
392 	/* function tracing enabled */
393 	int			function_enabled;
394 #endif
395 	int			no_filter_buffering_ref;
396 	struct list_head	hist_vars;
397 #ifdef CONFIG_TRACER_SNAPSHOT
398 	struct cond_snapshot	*cond_snapshot;
399 #endif
400 	struct trace_func_repeats	__percpu *last_func_repeats;
401 };
402 
403 enum {
404 	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
405 };
406 
407 extern struct list_head ftrace_trace_arrays;
408 
409 extern struct mutex trace_types_lock;
410 
411 extern int trace_array_get(struct trace_array *tr);
412 extern int tracing_check_open_get_tr(struct trace_array *tr);
413 extern struct trace_array *trace_array_find(const char *instance);
414 extern struct trace_array *trace_array_find_get(const char *instance);
415 
416 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
417 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
418 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
419 
420 extern bool trace_clock_in_ns(struct trace_array *tr);
421 
422 /*
423  * The global tracer (top) should be the first trace array added,
424  * but we check the flag anyway.
425  */
426 static inline struct trace_array *top_trace_array(void)
427 {
428 	struct trace_array *tr;
429 
430 	if (list_empty(&ftrace_trace_arrays))
431 		return NULL;
432 
433 	tr = list_entry(ftrace_trace_arrays.prev,
434 			typeof(*tr), list);
435 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
436 	return tr;
437 }
438 
439 #define FTRACE_CMP_TYPE(var, type) \
440 	__builtin_types_compatible_p(typeof(var), type *)
441 
442 #undef IF_ASSIGN
443 #define IF_ASSIGN(var, entry, etype, id)			\
444 	if (FTRACE_CMP_TYPE(var, etype)) {			\
445 		var = (typeof(var))(entry);			\
446 		WARN_ON(id != 0 && (entry)->type != id);	\
447 		break;						\
448 	}
449 
450 /* Will cause compile errors if type is not found. */
451 extern void __ftrace_bad_type(void);
452 
453 /*
454  * The trace_assign_type is a verifier that the entry type is
455  * the same as the type being assigned. To add new types simply
456  * add a line with the following format:
457  *
458  * IF_ASSIGN(var, ent, type, id);
459  *
460  *  Where "type" is the trace type that includes the trace_entry
461  *  as the "ent" item. And "id" is the trace identifier that is
462  *  used in the trace_type enum.
463  *
464  *  If the type can have more than one id, then use zero.
465  */
466 #define trace_assign_type(var, ent)					\
467 	do {								\
468 		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
469 		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
470 		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
471 		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
472 		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
473 		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
474 		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
475 		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
476 		IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
477 		IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
478 		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
479 		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
480 			  TRACE_MMIO_RW);				\
481 		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
482 			  TRACE_MMIO_MAP);				\
483 		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
484 		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
485 			  TRACE_GRAPH_ENT);		\
486 		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
487 			  TRACE_GRAPH_RET);		\
488 		IF_ASSIGN(var, ent, struct func_repeats_entry,		\
489 			  TRACE_FUNC_REPEATS);				\
490 		__ftrace_bad_type();					\
491 	} while (0)
492 
493 /*
494  * An option specific to a tracer. This is a boolean value.
495  * The bit is the bit index that sets its value on the
496  * flags value in struct tracer_flags.
497  */
498 struct tracer_opt {
499 	const char	*name; /* Will appear on the trace_options file */
500 	u32		bit; /* Mask assigned in val field in tracer_flags */
501 };
502 
503 /*
504  * The set of specific options for a tracer. Your tracer
505  * have to set the initial value of the flags val.
506  */
507 struct tracer_flags {
508 	u32			val;
509 	struct tracer_opt	*opts;
510 	struct tracer		*trace;
511 };
512 
513 /* Makes more easy to define a tracer opt */
514 #define TRACER_OPT(s, b)	.name = #s, .bit = b
515 
516 
517 struct trace_option_dentry {
518 	struct tracer_opt		*opt;
519 	struct tracer_flags		*flags;
520 	struct trace_array		*tr;
521 	struct dentry			*entry;
522 };
523 
524 /**
525  * struct tracer - a specific tracer and its callbacks to interact with tracefs
526  * @name: the name chosen to select it on the available_tracers file
527  * @init: called when one switches to this tracer (echo name > current_tracer)
528  * @reset: called when one switches to another tracer
529  * @start: called when tracing is unpaused (echo 1 > tracing_on)
530  * @stop: called when tracing is paused (echo 0 > tracing_on)
531  * @update_thresh: called when tracing_thresh is updated
532  * @open: called when the trace file is opened
533  * @pipe_open: called when the trace_pipe file is opened
534  * @close: called when the trace file is released
535  * @pipe_close: called when the trace_pipe file is released
536  * @read: override the default read callback on trace_pipe
537  * @splice_read: override the default splice_read callback on trace_pipe
538  * @selftest: selftest to run on boot (see trace_selftest.c)
539  * @print_headers: override the first lines that describe your columns
540  * @print_line: callback that prints a trace
541  * @set_flag: signals one of your private flags changed (trace_options file)
542  * @flags: your private flags
543  */
544 struct tracer {
545 	const char		*name;
546 	int			(*init)(struct trace_array *tr);
547 	void			(*reset)(struct trace_array *tr);
548 	void			(*start)(struct trace_array *tr);
549 	void			(*stop)(struct trace_array *tr);
550 	int			(*update_thresh)(struct trace_array *tr);
551 	void			(*open)(struct trace_iterator *iter);
552 	void			(*pipe_open)(struct trace_iterator *iter);
553 	void			(*close)(struct trace_iterator *iter);
554 	void			(*pipe_close)(struct trace_iterator *iter);
555 	ssize_t			(*read)(struct trace_iterator *iter,
556 					struct file *filp, char __user *ubuf,
557 					size_t cnt, loff_t *ppos);
558 	ssize_t			(*splice_read)(struct trace_iterator *iter,
559 					       struct file *filp,
560 					       loff_t *ppos,
561 					       struct pipe_inode_info *pipe,
562 					       size_t len,
563 					       unsigned int flags);
564 #ifdef CONFIG_FTRACE_STARTUP_TEST
565 	int			(*selftest)(struct tracer *trace,
566 					    struct trace_array *tr);
567 #endif
568 	void			(*print_header)(struct seq_file *m);
569 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
570 	/* If you handled the flag setting, return 0 */
571 	int			(*set_flag)(struct trace_array *tr,
572 					    u32 old_flags, u32 bit, int set);
573 	/* Return 0 if OK with change, else return non-zero */
574 	int			(*flag_changed)(struct trace_array *tr,
575 						u32 mask, int set);
576 	struct tracer		*next;
577 	struct tracer_flags	*flags;
578 	int			enabled;
579 	bool			print_max;
580 	bool			allow_instances;
581 #ifdef CONFIG_TRACER_MAX_TRACE
582 	bool			use_max_tr;
583 #endif
584 	/* True if tracer cannot be enabled in kernel param */
585 	bool			noboot;
586 };
587 
588 static inline struct ring_buffer_iter *
589 trace_buffer_iter(struct trace_iterator *iter, int cpu)
590 {
591 	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
592 }
593 
594 int tracer_init(struct tracer *t, struct trace_array *tr);
595 int tracing_is_enabled(void);
596 void tracing_reset_online_cpus(struct array_buffer *buf);
597 void tracing_reset_current(int cpu);
598 void tracing_reset_all_online_cpus(void);
599 void tracing_reset_all_online_cpus_unlocked(void);
600 int tracing_open_generic(struct inode *inode, struct file *filp);
601 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
602 bool tracing_is_disabled(void);
603 bool tracer_tracing_is_on(struct trace_array *tr);
604 void tracer_tracing_on(struct trace_array *tr);
605 void tracer_tracing_off(struct trace_array *tr);
606 struct dentry *trace_create_file(const char *name,
607 				 umode_t mode,
608 				 struct dentry *parent,
609 				 void *data,
610 				 const struct file_operations *fops);
611 
612 int tracing_init_dentry(void);
613 
614 struct ring_buffer_event;
615 
616 struct ring_buffer_event *
617 trace_buffer_lock_reserve(struct trace_buffer *buffer,
618 			  int type,
619 			  unsigned long len,
620 			  unsigned int trace_ctx);
621 
622 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
623 						struct trace_array_cpu *data);
624 
625 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
626 					  int *ent_cpu, u64 *ent_ts);
627 
628 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
629 					struct ring_buffer_event *event);
630 
631 bool trace_is_tracepoint_string(const char *str);
632 const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
633 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
634 			 va_list ap) __printf(2, 0);
635 char *trace_iter_expand_format(struct trace_iterator *iter);
636 
637 int trace_empty(struct trace_iterator *iter);
638 
639 void *trace_find_next_entry_inc(struct trace_iterator *iter);
640 
641 void trace_init_global_iter(struct trace_iterator *iter);
642 
643 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
644 
645 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
646 unsigned long trace_total_entries(struct trace_array *tr);
647 
648 void trace_function(struct trace_array *tr,
649 		    unsigned long ip,
650 		    unsigned long parent_ip,
651 		    unsigned int trace_ctx);
652 void trace_graph_function(struct trace_array *tr,
653 		    unsigned long ip,
654 		    unsigned long parent_ip,
655 		    unsigned int trace_ctx);
656 void trace_latency_header(struct seq_file *m);
657 void trace_default_header(struct seq_file *m);
658 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
659 
660 void trace_graph_return(struct ftrace_graph_ret *trace);
661 int trace_graph_entry(struct ftrace_graph_ent *trace);
662 void set_graph_array(struct trace_array *tr);
663 
664 void tracing_start_cmdline_record(void);
665 void tracing_stop_cmdline_record(void);
666 void tracing_start_tgid_record(void);
667 void tracing_stop_tgid_record(void);
668 
669 int register_tracer(struct tracer *type);
670 int is_tracing_stopped(void);
671 
672 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
673 
674 extern cpumask_var_t __read_mostly tracing_buffer_mask;
675 
676 #define for_each_tracing_cpu(cpu)	\
677 	for_each_cpu(cpu, tracing_buffer_mask)
678 
679 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
680 
681 extern unsigned long tracing_thresh;
682 
683 /* PID filtering */
684 
685 extern int pid_max;
686 
687 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
688 			     pid_t search_pid);
689 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
690 			    struct trace_pid_list *filtered_no_pids,
691 			    struct task_struct *task);
692 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
693 				  struct task_struct *self,
694 				  struct task_struct *task);
695 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
696 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
697 int trace_pid_show(struct seq_file *m, void *v);
698 void trace_free_pid_list(struct trace_pid_list *pid_list);
699 int trace_pid_write(struct trace_pid_list *filtered_pids,
700 		    struct trace_pid_list **new_pid_list,
701 		    const char __user *ubuf, size_t cnt);
702 
703 #ifdef CONFIG_TRACER_MAX_TRACE
704 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
705 		   void *cond_data);
706 void update_max_tr_single(struct trace_array *tr,
707 			  struct task_struct *tsk, int cpu);
708 
709 #ifdef CONFIG_FSNOTIFY
710 #define LATENCY_FS_NOTIFY
711 #endif
712 #endif /* CONFIG_TRACER_MAX_TRACE */
713 
714 #ifdef LATENCY_FS_NOTIFY
715 void latency_fsnotify(struct trace_array *tr);
716 #else
717 static inline void latency_fsnotify(struct trace_array *tr) { }
718 #endif
719 
720 #ifdef CONFIG_STACKTRACE
721 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
722 #else
723 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
724 				 int skip)
725 {
726 }
727 #endif /* CONFIG_STACKTRACE */
728 
729 void trace_last_func_repeats(struct trace_array *tr,
730 			     struct trace_func_repeats *last_info,
731 			     unsigned int trace_ctx);
732 
733 extern u64 ftrace_now(int cpu);
734 
735 extern void trace_find_cmdline(int pid, char comm[]);
736 extern int trace_find_tgid(int pid);
737 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
738 
739 #ifdef CONFIG_DYNAMIC_FTRACE
740 extern unsigned long ftrace_update_tot_cnt;
741 extern unsigned long ftrace_number_of_pages;
742 extern unsigned long ftrace_number_of_groups;
743 void ftrace_init_trace_array(struct trace_array *tr);
744 #else
745 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
746 #endif
747 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
748 extern int DYN_FTRACE_TEST_NAME(void);
749 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
750 extern int DYN_FTRACE_TEST_NAME2(void);
751 
752 extern bool ring_buffer_expanded;
753 extern bool tracing_selftest_disabled;
754 
755 #ifdef CONFIG_FTRACE_STARTUP_TEST
756 extern void __init disable_tracing_selftest(const char *reason);
757 
758 extern int trace_selftest_startup_function(struct tracer *trace,
759 					   struct trace_array *tr);
760 extern int trace_selftest_startup_function_graph(struct tracer *trace,
761 						 struct trace_array *tr);
762 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
763 					  struct trace_array *tr);
764 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
765 					     struct trace_array *tr);
766 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
767 						 struct trace_array *tr);
768 extern int trace_selftest_startup_wakeup(struct tracer *trace,
769 					 struct trace_array *tr);
770 extern int trace_selftest_startup_nop(struct tracer *trace,
771 					 struct trace_array *tr);
772 extern int trace_selftest_startup_branch(struct tracer *trace,
773 					 struct trace_array *tr);
774 /*
775  * Tracer data references selftest functions that only occur
776  * on boot up. These can be __init functions. Thus, when selftests
777  * are enabled, then the tracers need to reference __init functions.
778  */
779 #define __tracer_data		__refdata
780 #else
781 static inline void __init disable_tracing_selftest(const char *reason)
782 {
783 }
784 /* Tracers are seldom changed. Optimize when selftests are disabled. */
785 #define __tracer_data		__read_mostly
786 #endif /* CONFIG_FTRACE_STARTUP_TEST */
787 
788 extern void *head_page(struct trace_array_cpu *data);
789 extern unsigned long long ns2usecs(u64 nsec);
790 extern int
791 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
792 extern int
793 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
794 extern int
795 trace_array_vprintk(struct trace_array *tr,
796 		    unsigned long ip, const char *fmt, va_list args);
797 int trace_array_printk_buf(struct trace_buffer *buffer,
798 			   unsigned long ip, const char *fmt, ...);
799 void trace_printk_seq(struct trace_seq *s);
800 enum print_line_t print_trace_line(struct trace_iterator *iter);
801 
802 extern char trace_find_mark(unsigned long long duration);
803 
804 struct ftrace_hash;
805 
806 struct ftrace_mod_load {
807 	struct list_head	list;
808 	char			*func;
809 	char			*module;
810 	int			 enable;
811 };
812 
813 enum {
814 	FTRACE_HASH_FL_MOD	= (1 << 0),
815 };
816 
817 struct ftrace_hash {
818 	unsigned long		size_bits;
819 	struct hlist_head	*buckets;
820 	unsigned long		count;
821 	unsigned long		flags;
822 	struct rcu_head		rcu;
823 };
824 
825 struct ftrace_func_entry *
826 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
827 
828 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
829 {
830 	return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
831 }
832 
833 /* Standard output formatting function used for function return traces */
834 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
835 
836 /* Flag options */
837 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
838 #define TRACE_GRAPH_PRINT_CPU           0x2
839 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
840 #define TRACE_GRAPH_PRINT_PROC          0x8
841 #define TRACE_GRAPH_PRINT_DURATION      0x10
842 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
843 #define TRACE_GRAPH_PRINT_REL_TIME      0x40
844 #define TRACE_GRAPH_PRINT_IRQS          0x80
845 #define TRACE_GRAPH_PRINT_TAIL          0x100
846 #define TRACE_GRAPH_SLEEP_TIME          0x200
847 #define TRACE_GRAPH_GRAPH_TIME          0x400
848 #define TRACE_GRAPH_PRINT_RETVAL        0x800
849 #define TRACE_GRAPH_PRINT_RETVAL_HEX    0x1000
850 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
851 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
852 
853 extern void ftrace_graph_sleep_time_control(bool enable);
854 
855 #ifdef CONFIG_FUNCTION_PROFILER
856 extern void ftrace_graph_graph_time_control(bool enable);
857 #else
858 static inline void ftrace_graph_graph_time_control(bool enable) { }
859 #endif
860 
861 extern enum print_line_t
862 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
863 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
864 extern void
865 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
866 extern void graph_trace_open(struct trace_iterator *iter);
867 extern void graph_trace_close(struct trace_iterator *iter);
868 extern int __trace_graph_entry(struct trace_array *tr,
869 			       struct ftrace_graph_ent *trace,
870 			       unsigned int trace_ctx);
871 extern void __trace_graph_return(struct trace_array *tr,
872 				 struct ftrace_graph_ret *trace,
873 				 unsigned int trace_ctx);
874 
875 #ifdef CONFIG_DYNAMIC_FTRACE
876 extern struct ftrace_hash __rcu *ftrace_graph_hash;
877 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
878 
879 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
880 {
881 	unsigned long addr = trace->func;
882 	int ret = 0;
883 	struct ftrace_hash *hash;
884 
885 	preempt_disable_notrace();
886 
887 	/*
888 	 * Have to open code "rcu_dereference_sched()" because the
889 	 * function graph tracer can be called when RCU is not
890 	 * "watching".
891 	 * Protected with schedule_on_each_cpu(ftrace_sync)
892 	 */
893 	hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
894 
895 	if (ftrace_hash_empty(hash)) {
896 		ret = 1;
897 		goto out;
898 	}
899 
900 	if (ftrace_lookup_ip(hash, addr)) {
901 
902 		/*
903 		 * This needs to be cleared on the return functions
904 		 * when the depth is zero.
905 		 */
906 		trace_recursion_set(TRACE_GRAPH_BIT);
907 		trace_recursion_set_depth(trace->depth);
908 
909 		/*
910 		 * If no irqs are to be traced, but a set_graph_function
911 		 * is set, and called by an interrupt handler, we still
912 		 * want to trace it.
913 		 */
914 		if (in_hardirq())
915 			trace_recursion_set(TRACE_IRQ_BIT);
916 		else
917 			trace_recursion_clear(TRACE_IRQ_BIT);
918 		ret = 1;
919 	}
920 
921 out:
922 	preempt_enable_notrace();
923 	return ret;
924 }
925 
926 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
927 {
928 	if (trace_recursion_test(TRACE_GRAPH_BIT) &&
929 	    trace->depth == trace_recursion_depth())
930 		trace_recursion_clear(TRACE_GRAPH_BIT);
931 }
932 
933 static inline int ftrace_graph_notrace_addr(unsigned long addr)
934 {
935 	int ret = 0;
936 	struct ftrace_hash *notrace_hash;
937 
938 	preempt_disable_notrace();
939 
940 	/*
941 	 * Have to open code "rcu_dereference_sched()" because the
942 	 * function graph tracer can be called when RCU is not
943 	 * "watching".
944 	 * Protected with schedule_on_each_cpu(ftrace_sync)
945 	 */
946 	notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
947 						 !preemptible());
948 
949 	if (ftrace_lookup_ip(notrace_hash, addr))
950 		ret = 1;
951 
952 	preempt_enable_notrace();
953 	return ret;
954 }
955 #else
956 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
957 {
958 	return 1;
959 }
960 
961 static inline int ftrace_graph_notrace_addr(unsigned long addr)
962 {
963 	return 0;
964 }
965 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
966 { }
967 #endif /* CONFIG_DYNAMIC_FTRACE */
968 
969 extern unsigned int fgraph_max_depth;
970 
971 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
972 {
973 	/* trace it when it is-nested-in or is a function enabled. */
974 	return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
975 		 ftrace_graph_addr(trace)) ||
976 		(trace->depth < 0) ||
977 		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
978 }
979 
980 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
981 static inline enum print_line_t
982 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
983 {
984 	return TRACE_TYPE_UNHANDLED;
985 }
986 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
987 
988 extern struct list_head ftrace_pids;
989 
990 #ifdef CONFIG_FUNCTION_TRACER
991 
992 #define FTRACE_PID_IGNORE	-1
993 #define FTRACE_PID_TRACE	-2
994 
995 struct ftrace_func_command {
996 	struct list_head	list;
997 	char			*name;
998 	int			(*func)(struct trace_array *tr,
999 					struct ftrace_hash *hash,
1000 					char *func, char *cmd,
1001 					char *params, int enable);
1002 };
1003 extern bool ftrace_filter_param __initdata;
1004 static inline int ftrace_trace_task(struct trace_array *tr)
1005 {
1006 	return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1007 		FTRACE_PID_IGNORE;
1008 }
1009 extern int ftrace_is_dead(void);
1010 int ftrace_create_function_files(struct trace_array *tr,
1011 				 struct dentry *parent);
1012 void ftrace_destroy_function_files(struct trace_array *tr);
1013 int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1014 void ftrace_free_ftrace_ops(struct trace_array *tr);
1015 void ftrace_init_global_array_ops(struct trace_array *tr);
1016 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1017 void ftrace_reset_array_ops(struct trace_array *tr);
1018 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1019 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1020 				  struct dentry *d_tracer);
1021 void ftrace_clear_pids(struct trace_array *tr);
1022 int init_function_trace(void);
1023 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1024 #else
1025 static inline int ftrace_trace_task(struct trace_array *tr)
1026 {
1027 	return 1;
1028 }
1029 static inline int ftrace_is_dead(void) { return 0; }
1030 static inline int
1031 ftrace_create_function_files(struct trace_array *tr,
1032 			     struct dentry *parent)
1033 {
1034 	return 0;
1035 }
1036 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1037 {
1038 	return 0;
1039 }
1040 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
1041 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1042 static inline __init void
1043 ftrace_init_global_array_ops(struct trace_array *tr) { }
1044 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1045 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1046 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1047 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1048 static inline int init_function_trace(void) { return 0; }
1049 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1050 /* ftace_func_t type is not defined, use macro instead of static inline */
1051 #define ftrace_init_array_ops(tr, func) do { } while (0)
1052 #endif /* CONFIG_FUNCTION_TRACER */
1053 
1054 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1055 
1056 struct ftrace_probe_ops {
1057 	void			(*func)(unsigned long ip,
1058 					unsigned long parent_ip,
1059 					struct trace_array *tr,
1060 					struct ftrace_probe_ops *ops,
1061 					void *data);
1062 	int			(*init)(struct ftrace_probe_ops *ops,
1063 					struct trace_array *tr,
1064 					unsigned long ip, void *init_data,
1065 					void **data);
1066 	void			(*free)(struct ftrace_probe_ops *ops,
1067 					struct trace_array *tr,
1068 					unsigned long ip, void *data);
1069 	int			(*print)(struct seq_file *m,
1070 					 unsigned long ip,
1071 					 struct ftrace_probe_ops *ops,
1072 					 void *data);
1073 };
1074 
1075 struct ftrace_func_mapper;
1076 typedef int (*ftrace_mapper_func)(void *data);
1077 
1078 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1079 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1080 					   unsigned long ip);
1081 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1082 			       unsigned long ip, void *data);
1083 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1084 				   unsigned long ip);
1085 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1086 			     ftrace_mapper_func free_func);
1087 
1088 extern int
1089 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1090 			       struct ftrace_probe_ops *ops, void *data);
1091 extern int
1092 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1093 				      struct ftrace_probe_ops *ops);
1094 extern void clear_ftrace_function_probes(struct trace_array *tr);
1095 
1096 int register_ftrace_command(struct ftrace_func_command *cmd);
1097 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1098 
1099 void ftrace_create_filter_files(struct ftrace_ops *ops,
1100 				struct dentry *parent);
1101 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1102 
1103 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1104 			     int len, int reset);
1105 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1106 			      int len, int reset);
1107 #else
1108 struct ftrace_func_command;
1109 
1110 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1111 {
1112 	return -EINVAL;
1113 }
1114 static inline __init int unregister_ftrace_command(char *cmd_name)
1115 {
1116 	return -EINVAL;
1117 }
1118 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1119 {
1120 }
1121 
1122 /*
1123  * The ops parameter passed in is usually undefined.
1124  * This must be a macro.
1125  */
1126 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1127 #define ftrace_destroy_filter_files(ops) do { } while (0)
1128 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1129 
1130 bool ftrace_event_is_function(struct trace_event_call *call);
1131 
1132 /*
1133  * struct trace_parser - servers for reading the user input separated by spaces
1134  * @cont: set if the input is not complete - no final space char was found
1135  * @buffer: holds the parsed user input
1136  * @idx: user input length
1137  * @size: buffer size
1138  */
1139 struct trace_parser {
1140 	bool		cont;
1141 	char		*buffer;
1142 	unsigned	idx;
1143 	unsigned	size;
1144 };
1145 
1146 static inline bool trace_parser_loaded(struct trace_parser *parser)
1147 {
1148 	return (parser->idx != 0);
1149 }
1150 
1151 static inline bool trace_parser_cont(struct trace_parser *parser)
1152 {
1153 	return parser->cont;
1154 }
1155 
1156 static inline void trace_parser_clear(struct trace_parser *parser)
1157 {
1158 	parser->cont = false;
1159 	parser->idx = 0;
1160 }
1161 
1162 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1163 extern void trace_parser_put(struct trace_parser *parser);
1164 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1165 	size_t cnt, loff_t *ppos);
1166 
1167 /*
1168  * Only create function graph options if function graph is configured.
1169  */
1170 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1171 # define FGRAPH_FLAGS						\
1172 		C(DISPLAY_GRAPH,	"display-graph"),
1173 #else
1174 # define FGRAPH_FLAGS
1175 #endif
1176 
1177 #ifdef CONFIG_BRANCH_TRACER
1178 # define BRANCH_FLAGS					\
1179 		C(BRANCH,		"branch"),
1180 #else
1181 # define BRANCH_FLAGS
1182 #endif
1183 
1184 #ifdef CONFIG_FUNCTION_TRACER
1185 # define FUNCTION_FLAGS						\
1186 		C(FUNCTION,		"function-trace"),	\
1187 		C(FUNC_FORK,		"function-fork"),
1188 # define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
1189 #else
1190 # define FUNCTION_FLAGS
1191 # define FUNCTION_DEFAULT_FLAGS		0UL
1192 # define TRACE_ITER_FUNC_FORK		0UL
1193 #endif
1194 
1195 #ifdef CONFIG_STACKTRACE
1196 # define STACK_FLAGS				\
1197 		C(STACKTRACE,		"stacktrace"),
1198 #else
1199 # define STACK_FLAGS
1200 #endif
1201 
1202 /*
1203  * trace_iterator_flags is an enumeration that defines bit
1204  * positions into trace_flags that controls the output.
1205  *
1206  * NOTE: These bits must match the trace_options array in
1207  *       trace.c (this macro guarantees it).
1208  */
1209 #define TRACE_FLAGS						\
1210 		C(PRINT_PARENT,		"print-parent"),	\
1211 		C(SYM_OFFSET,		"sym-offset"),		\
1212 		C(SYM_ADDR,		"sym-addr"),		\
1213 		C(VERBOSE,		"verbose"),		\
1214 		C(RAW,			"raw"),			\
1215 		C(HEX,			"hex"),			\
1216 		C(BIN,			"bin"),			\
1217 		C(BLOCK,		"block"),		\
1218 		C(FIELDS,		"fields"),		\
1219 		C(PRINTK,		"trace_printk"),	\
1220 		C(ANNOTATE,		"annotate"),		\
1221 		C(USERSTACKTRACE,	"userstacktrace"),	\
1222 		C(SYM_USEROBJ,		"sym-userobj"),		\
1223 		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
1224 		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
1225 		C(LATENCY_FMT,		"latency-format"),	\
1226 		C(RECORD_CMD,		"record-cmd"),		\
1227 		C(RECORD_TGID,		"record-tgid"),		\
1228 		C(OVERWRITE,		"overwrite"),		\
1229 		C(STOP_ON_FREE,		"disable_on_free"),	\
1230 		C(IRQ_INFO,		"irq-info"),		\
1231 		C(MARKERS,		"markers"),		\
1232 		C(EVENT_FORK,		"event-fork"),		\
1233 		C(PAUSE_ON_TRACE,	"pause-on-trace"),	\
1234 		C(HASH_PTR,		"hash-ptr"),	/* Print hashed pointer */ \
1235 		FUNCTION_FLAGS					\
1236 		FGRAPH_FLAGS					\
1237 		STACK_FLAGS					\
1238 		BRANCH_FLAGS
1239 
1240 /*
1241  * By defining C, we can make TRACE_FLAGS a list of bit names
1242  * that will define the bits for the flag masks.
1243  */
1244 #undef C
1245 #define C(a, b) TRACE_ITER_##a##_BIT
1246 
1247 enum trace_iterator_bits {
1248 	TRACE_FLAGS
1249 	/* Make sure we don't go more than we have bits for */
1250 	TRACE_ITER_LAST_BIT
1251 };
1252 
1253 /*
1254  * By redefining C, we can make TRACE_FLAGS a list of masks that
1255  * use the bits as defined above.
1256  */
1257 #undef C
1258 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1259 
1260 enum trace_iterator_flags { TRACE_FLAGS };
1261 
1262 /*
1263  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1264  * control the output of kernel symbols.
1265  */
1266 #define TRACE_ITER_SYM_MASK \
1267 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1268 
1269 extern struct tracer nop_trace;
1270 
1271 #ifdef CONFIG_BRANCH_TRACER
1272 extern int enable_branch_tracing(struct trace_array *tr);
1273 extern void disable_branch_tracing(void);
1274 static inline int trace_branch_enable(struct trace_array *tr)
1275 {
1276 	if (tr->trace_flags & TRACE_ITER_BRANCH)
1277 		return enable_branch_tracing(tr);
1278 	return 0;
1279 }
1280 static inline void trace_branch_disable(void)
1281 {
1282 	/* due to races, always disable */
1283 	disable_branch_tracing();
1284 }
1285 #else
1286 static inline int trace_branch_enable(struct trace_array *tr)
1287 {
1288 	return 0;
1289 }
1290 static inline void trace_branch_disable(void)
1291 {
1292 }
1293 #endif /* CONFIG_BRANCH_TRACER */
1294 
1295 /* set ring buffers to default size if not already done so */
1296 int tracing_update_buffers(void);
1297 
1298 union trace_synth_field {
1299 	u8				as_u8;
1300 	u16				as_u16;
1301 	u32				as_u32;
1302 	u64				as_u64;
1303 	struct trace_dynamic_info	as_dynamic;
1304 };
1305 
1306 struct ftrace_event_field {
1307 	struct list_head	link;
1308 	const char		*name;
1309 	const char		*type;
1310 	int			filter_type;
1311 	int			offset;
1312 	int			size;
1313 	int			is_signed;
1314 	int			len;
1315 };
1316 
1317 struct prog_entry;
1318 
1319 struct event_filter {
1320 	struct prog_entry __rcu	*prog;
1321 	char			*filter_string;
1322 };
1323 
1324 struct event_subsystem {
1325 	struct list_head	list;
1326 	const char		*name;
1327 	struct event_filter	*filter;
1328 	int			ref_count;
1329 };
1330 
1331 struct trace_subsystem_dir {
1332 	struct list_head		list;
1333 	struct event_subsystem		*subsystem;
1334 	struct trace_array		*tr;
1335 	struct dentry			*entry;
1336 	int				ref_count;
1337 	int				nr_events;
1338 };
1339 
1340 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1341 				     struct trace_buffer *buffer,
1342 				     struct ring_buffer_event *event);
1343 
1344 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1345 				     struct trace_buffer *buffer,
1346 				     struct ring_buffer_event *event,
1347 				     unsigned int trcace_ctx,
1348 				     struct pt_regs *regs);
1349 
1350 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1351 					      struct trace_buffer *buffer,
1352 					      struct ring_buffer_event *event,
1353 					      unsigned int trace_ctx)
1354 {
1355 	trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1356 }
1357 
1358 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1359 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1360 void trace_buffered_event_disable(void);
1361 void trace_buffered_event_enable(void);
1362 
1363 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
1364 
1365 static inline void
1366 __trace_event_discard_commit(struct trace_buffer *buffer,
1367 			     struct ring_buffer_event *event)
1368 {
1369 	if (this_cpu_read(trace_buffered_event) == event) {
1370 		/* Simply release the temp buffer and enable preemption */
1371 		this_cpu_dec(trace_buffered_event_cnt);
1372 		preempt_enable_notrace();
1373 		return;
1374 	}
1375 	/* ring_buffer_discard_commit() enables preemption */
1376 	ring_buffer_discard_commit(buffer, event);
1377 }
1378 
1379 /*
1380  * Helper function for event_trigger_unlock_commit{_regs}().
1381  * If there are event triggers attached to this event that requires
1382  * filtering against its fields, then they will be called as the
1383  * entry already holds the field information of the current event.
1384  *
1385  * It also checks if the event should be discarded or not.
1386  * It is to be discarded if the event is soft disabled and the
1387  * event was only recorded to process triggers, or if the event
1388  * filter is active and this event did not match the filters.
1389  *
1390  * Returns true if the event is discarded, false otherwise.
1391  */
1392 static inline bool
1393 __event_trigger_test_discard(struct trace_event_file *file,
1394 			     struct trace_buffer *buffer,
1395 			     struct ring_buffer_event *event,
1396 			     void *entry,
1397 			     enum event_trigger_type *tt)
1398 {
1399 	unsigned long eflags = file->flags;
1400 
1401 	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1402 		*tt = event_triggers_call(file, buffer, entry, event);
1403 
1404 	if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1405 				    EVENT_FILE_FL_FILTERED |
1406 				    EVENT_FILE_FL_PID_FILTER))))
1407 		return false;
1408 
1409 	if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1410 		goto discard;
1411 
1412 	if (file->flags & EVENT_FILE_FL_FILTERED &&
1413 	    !filter_match_preds(file->filter, entry))
1414 		goto discard;
1415 
1416 	if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1417 	    trace_event_ignore_this_pid(file))
1418 		goto discard;
1419 
1420 	return false;
1421  discard:
1422 	__trace_event_discard_commit(buffer, event);
1423 	return true;
1424 }
1425 
1426 /**
1427  * event_trigger_unlock_commit - handle triggers and finish event commit
1428  * @file: The file pointer associated with the event
1429  * @buffer: The ring buffer that the event is being written to
1430  * @event: The event meta data in the ring buffer
1431  * @entry: The event itself
1432  * @trace_ctx: The tracing context flags.
1433  *
1434  * This is a helper function to handle triggers that require data
1435  * from the event itself. It also tests the event against filters and
1436  * if the event is soft disabled and should be discarded.
1437  */
1438 static inline void
1439 event_trigger_unlock_commit(struct trace_event_file *file,
1440 			    struct trace_buffer *buffer,
1441 			    struct ring_buffer_event *event,
1442 			    void *entry, unsigned int trace_ctx)
1443 {
1444 	enum event_trigger_type tt = ETT_NONE;
1445 
1446 	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1447 		trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1448 
1449 	if (tt)
1450 		event_triggers_post_call(file, tt);
1451 }
1452 
1453 #define FILTER_PRED_INVALID	((unsigned short)-1)
1454 #define FILTER_PRED_IS_RIGHT	(1 << 15)
1455 #define FILTER_PRED_FOLD	(1 << 15)
1456 
1457 /*
1458  * The max preds is the size of unsigned short with
1459  * two flags at the MSBs. One bit is used for both the IS_RIGHT
1460  * and FOLD flags. The other is reserved.
1461  *
1462  * 2^14 preds is way more than enough.
1463  */
1464 #define MAX_FILTER_PRED		16384
1465 
1466 struct filter_pred;
1467 struct regex;
1468 
1469 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1470 
1471 enum regex_type {
1472 	MATCH_FULL = 0,
1473 	MATCH_FRONT_ONLY,
1474 	MATCH_MIDDLE_ONLY,
1475 	MATCH_END_ONLY,
1476 	MATCH_GLOB,
1477 	MATCH_INDEX,
1478 };
1479 
1480 struct regex {
1481 	char			pattern[MAX_FILTER_STR_VAL];
1482 	int			len;
1483 	int			field_len;
1484 	regex_match_func	match;
1485 };
1486 
1487 static inline bool is_string_field(struct ftrace_event_field *field)
1488 {
1489 	return field->filter_type == FILTER_DYN_STRING ||
1490 	       field->filter_type == FILTER_RDYN_STRING ||
1491 	       field->filter_type == FILTER_STATIC_STRING ||
1492 	       field->filter_type == FILTER_PTR_STRING ||
1493 	       field->filter_type == FILTER_COMM;
1494 }
1495 
1496 static inline bool is_function_field(struct ftrace_event_field *field)
1497 {
1498 	return field->filter_type == FILTER_TRACE_FN;
1499 }
1500 
1501 extern enum regex_type
1502 filter_parse_regex(char *buff, int len, char **search, int *not);
1503 extern void print_event_filter(struct trace_event_file *file,
1504 			       struct trace_seq *s);
1505 extern int apply_event_filter(struct trace_event_file *file,
1506 			      char *filter_string);
1507 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1508 					char *filter_string);
1509 extern void print_subsystem_event_filter(struct event_subsystem *system,
1510 					 struct trace_seq *s);
1511 extern int filter_assign_type(const char *type);
1512 extern int create_event_filter(struct trace_array *tr,
1513 			       struct trace_event_call *call,
1514 			       char *filter_str, bool set_str,
1515 			       struct event_filter **filterp);
1516 extern void free_event_filter(struct event_filter *filter);
1517 
1518 struct ftrace_event_field *
1519 trace_find_event_field(struct trace_event_call *call, char *name);
1520 
1521 extern void trace_event_enable_cmd_record(bool enable);
1522 extern void trace_event_enable_tgid_record(bool enable);
1523 
1524 extern int event_trace_init(void);
1525 extern int init_events(void);
1526 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1527 extern int event_trace_del_tracer(struct trace_array *tr);
1528 extern void __trace_early_add_events(struct trace_array *tr);
1529 
1530 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1531 						  const char *system,
1532 						  const char *event);
1533 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1534 						const char *system,
1535 						const char *event);
1536 
1537 static inline void *event_file_data(struct file *filp)
1538 {
1539 	return READ_ONCE(file_inode(filp)->i_private);
1540 }
1541 
1542 extern struct mutex event_mutex;
1543 extern struct list_head ftrace_events;
1544 
1545 extern const struct file_operations event_trigger_fops;
1546 extern const struct file_operations event_hist_fops;
1547 extern const struct file_operations event_hist_debug_fops;
1548 extern const struct file_operations event_inject_fops;
1549 
1550 #ifdef CONFIG_HIST_TRIGGERS
1551 extern int register_trigger_hist_cmd(void);
1552 extern int register_trigger_hist_enable_disable_cmds(void);
1553 #else
1554 static inline int register_trigger_hist_cmd(void) { return 0; }
1555 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1556 #endif
1557 
1558 extern int register_trigger_cmds(void);
1559 extern void clear_event_triggers(struct trace_array *tr);
1560 
1561 enum {
1562 	EVENT_TRIGGER_FL_PROBE		= BIT(0),
1563 };
1564 
1565 struct event_trigger_data {
1566 	unsigned long			count;
1567 	int				ref;
1568 	int				flags;
1569 	struct event_trigger_ops	*ops;
1570 	struct event_command		*cmd_ops;
1571 	struct event_filter __rcu	*filter;
1572 	char				*filter_str;
1573 	void				*private_data;
1574 	bool				paused;
1575 	bool				paused_tmp;
1576 	struct list_head		list;
1577 	char				*name;
1578 	struct list_head		named_list;
1579 	struct event_trigger_data	*named_data;
1580 };
1581 
1582 /* Avoid typos */
1583 #define ENABLE_EVENT_STR	"enable_event"
1584 #define DISABLE_EVENT_STR	"disable_event"
1585 #define ENABLE_HIST_STR		"enable_hist"
1586 #define DISABLE_HIST_STR	"disable_hist"
1587 
1588 struct enable_trigger_data {
1589 	struct trace_event_file		*file;
1590 	bool				enable;
1591 	bool				hist;
1592 };
1593 
1594 extern int event_enable_trigger_print(struct seq_file *m,
1595 				      struct event_trigger_data *data);
1596 extern void event_enable_trigger_free(struct event_trigger_data *data);
1597 extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1598 				      struct trace_event_file *file,
1599 				      char *glob, char *cmd,
1600 				      char *param_and_filter);
1601 extern int event_enable_register_trigger(char *glob,
1602 					 struct event_trigger_data *data,
1603 					 struct trace_event_file *file);
1604 extern void event_enable_unregister_trigger(char *glob,
1605 					    struct event_trigger_data *test,
1606 					    struct trace_event_file *file);
1607 extern void trigger_data_free(struct event_trigger_data *data);
1608 extern int event_trigger_init(struct event_trigger_data *data);
1609 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1610 					      int trigger_enable);
1611 extern void update_cond_flag(struct trace_event_file *file);
1612 extern int set_trigger_filter(char *filter_str,
1613 			      struct event_trigger_data *trigger_data,
1614 			      struct trace_event_file *file);
1615 extern struct event_trigger_data *find_named_trigger(const char *name);
1616 extern bool is_named_trigger(struct event_trigger_data *test);
1617 extern int save_named_trigger(const char *name,
1618 			      struct event_trigger_data *data);
1619 extern void del_named_trigger(struct event_trigger_data *data);
1620 extern void pause_named_trigger(struct event_trigger_data *data);
1621 extern void unpause_named_trigger(struct event_trigger_data *data);
1622 extern void set_named_trigger_data(struct event_trigger_data *data,
1623 				   struct event_trigger_data *named_data);
1624 extern struct event_trigger_data *
1625 get_named_trigger_data(struct event_trigger_data *data);
1626 extern int register_event_command(struct event_command *cmd);
1627 extern int unregister_event_command(struct event_command *cmd);
1628 extern int register_trigger_hist_enable_disable_cmds(void);
1629 extern bool event_trigger_check_remove(const char *glob);
1630 extern bool event_trigger_empty_param(const char *param);
1631 extern int event_trigger_separate_filter(char *param_and_filter, char **param,
1632 					 char **filter, bool param_required);
1633 extern struct event_trigger_data *
1634 event_trigger_alloc(struct event_command *cmd_ops,
1635 		    char *cmd,
1636 		    char *param,
1637 		    void *private_data);
1638 extern int event_trigger_parse_num(char *trigger,
1639 				   struct event_trigger_data *trigger_data);
1640 extern int event_trigger_set_filter(struct event_command *cmd_ops,
1641 				    struct trace_event_file *file,
1642 				    char *param,
1643 				    struct event_trigger_data *trigger_data);
1644 extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1645 				       struct event_trigger_data *trigger_data);
1646 extern int event_trigger_register(struct event_command *cmd_ops,
1647 				  struct trace_event_file *file,
1648 				  char *glob,
1649 				  struct event_trigger_data *trigger_data);
1650 extern void event_trigger_unregister(struct event_command *cmd_ops,
1651 				     struct trace_event_file *file,
1652 				     char *glob,
1653 				     struct event_trigger_data *trigger_data);
1654 
1655 /**
1656  * struct event_trigger_ops - callbacks for trace event triggers
1657  *
1658  * The methods in this structure provide per-event trigger hooks for
1659  * various trigger operations.
1660  *
1661  * The @init and @free methods are used during trigger setup and
1662  * teardown, typically called from an event_command's @parse()
1663  * function implementation.
1664  *
1665  * The @print method is used to print the trigger spec.
1666  *
1667  * The @trigger method is the function that actually implements the
1668  * trigger and is called in the context of the triggering event
1669  * whenever that event occurs.
1670  *
1671  * All the methods below, except for @init() and @free(), must be
1672  * implemented.
1673  *
1674  * @trigger: The trigger 'probe' function called when the triggering
1675  *	event occurs.  The data passed into this callback is the data
1676  *	that was supplied to the event_command @reg() function that
1677  *	registered the trigger (see struct event_command) along with
1678  *	the trace record, rec.
1679  *
1680  * @init: An optional initialization function called for the trigger
1681  *	when the trigger is registered (via the event_command reg()
1682  *	function).  This can be used to perform per-trigger
1683  *	initialization such as incrementing a per-trigger reference
1684  *	count, for instance.  This is usually implemented by the
1685  *	generic utility function @event_trigger_init() (see
1686  *	trace_event_triggers.c).
1687  *
1688  * @free: An optional de-initialization function called for the
1689  *	trigger when the trigger is unregistered (via the
1690  *	event_command @reg() function).  This can be used to perform
1691  *	per-trigger de-initialization such as decrementing a
1692  *	per-trigger reference count and freeing corresponding trigger
1693  *	data, for instance.  This is usually implemented by the
1694  *	generic utility function @event_trigger_free() (see
1695  *	trace_event_triggers.c).
1696  *
1697  * @print: The callback function invoked to have the trigger print
1698  *	itself.  This is usually implemented by a wrapper function
1699  *	that calls the generic utility function @event_trigger_print()
1700  *	(see trace_event_triggers.c).
1701  */
1702 struct event_trigger_ops {
1703 	void			(*trigger)(struct event_trigger_data *data,
1704 					   struct trace_buffer *buffer,
1705 					   void *rec,
1706 					   struct ring_buffer_event *rbe);
1707 	int			(*init)(struct event_trigger_data *data);
1708 	void			(*free)(struct event_trigger_data *data);
1709 	int			(*print)(struct seq_file *m,
1710 					 struct event_trigger_data *data);
1711 };
1712 
1713 /**
1714  * struct event_command - callbacks and data members for event commands
1715  *
1716  * Event commands are invoked by users by writing the command name
1717  * into the 'trigger' file associated with a trace event.  The
1718  * parameters associated with a specific invocation of an event
1719  * command are used to create an event trigger instance, which is
1720  * added to the list of trigger instances associated with that trace
1721  * event.  When the event is hit, the set of triggers associated with
1722  * that event is invoked.
1723  *
1724  * The data members in this structure provide per-event command data
1725  * for various event commands.
1726  *
1727  * All the data members below, except for @post_trigger, must be set
1728  * for each event command.
1729  *
1730  * @name: The unique name that identifies the event command.  This is
1731  *	the name used when setting triggers via trigger files.
1732  *
1733  * @trigger_type: A unique id that identifies the event command
1734  *	'type'.  This value has two purposes, the first to ensure that
1735  *	only one trigger of the same type can be set at a given time
1736  *	for a particular event e.g. it doesn't make sense to have both
1737  *	a traceon and traceoff trigger attached to a single event at
1738  *	the same time, so traceon and traceoff have the same type
1739  *	though they have different names.  The @trigger_type value is
1740  *	also used as a bit value for deferring the actual trigger
1741  *	action until after the current event is finished.  Some
1742  *	commands need to do this if they themselves log to the trace
1743  *	buffer (see the @post_trigger() member below).  @trigger_type
1744  *	values are defined by adding new values to the trigger_type
1745  *	enum in include/linux/trace_events.h.
1746  *
1747  * @flags: See the enum event_command_flags below.
1748  *
1749  * All the methods below, except for @set_filter() and @unreg_all(),
1750  * must be implemented.
1751  *
1752  * @parse: The callback function responsible for parsing and
1753  *	registering the trigger written to the 'trigger' file by the
1754  *	user.  It allocates the trigger instance and registers it with
1755  *	the appropriate trace event.  It makes use of the other
1756  *	event_command callback functions to orchestrate this, and is
1757  *	usually implemented by the generic utility function
1758  *	@event_trigger_callback() (see trace_event_triggers.c).
1759  *
1760  * @reg: Adds the trigger to the list of triggers associated with the
1761  *	event, and enables the event trigger itself, after
1762  *	initializing it (via the event_trigger_ops @init() function).
1763  *	This is also where commands can use the @trigger_type value to
1764  *	make the decision as to whether or not multiple instances of
1765  *	the trigger should be allowed.  This is usually implemented by
1766  *	the generic utility function @register_trigger() (see
1767  *	trace_event_triggers.c).
1768  *
1769  * @unreg: Removes the trigger from the list of triggers associated
1770  *	with the event, and disables the event trigger itself, after
1771  *	initializing it (via the event_trigger_ops @free() function).
1772  *	This is usually implemented by the generic utility function
1773  *	@unregister_trigger() (see trace_event_triggers.c).
1774  *
1775  * @unreg_all: An optional function called to remove all the triggers
1776  *	from the list of triggers associated with the event.  Called
1777  *	when a trigger file is opened in truncate mode.
1778  *
1779  * @set_filter: An optional function called to parse and set a filter
1780  *	for the trigger.  If no @set_filter() method is set for the
1781  *	event command, filters set by the user for the command will be
1782  *	ignored.  This is usually implemented by the generic utility
1783  *	function @set_trigger_filter() (see trace_event_triggers.c).
1784  *
1785  * @get_trigger_ops: The callback function invoked to retrieve the
1786  *	event_trigger_ops implementation associated with the command.
1787  *	This callback function allows a single event_command to
1788  *	support multiple trigger implementations via different sets of
1789  *	event_trigger_ops, depending on the value of the @param
1790  *	string.
1791  */
1792 struct event_command {
1793 	struct list_head	list;
1794 	char			*name;
1795 	enum event_trigger_type	trigger_type;
1796 	int			flags;
1797 	int			(*parse)(struct event_command *cmd_ops,
1798 					 struct trace_event_file *file,
1799 					 char *glob, char *cmd,
1800 					 char *param_and_filter);
1801 	int			(*reg)(char *glob,
1802 				       struct event_trigger_data *data,
1803 				       struct trace_event_file *file);
1804 	void			(*unreg)(char *glob,
1805 					 struct event_trigger_data *data,
1806 					 struct trace_event_file *file);
1807 	void			(*unreg_all)(struct trace_event_file *file);
1808 	int			(*set_filter)(char *filter_str,
1809 					      struct event_trigger_data *data,
1810 					      struct trace_event_file *file);
1811 	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1812 };
1813 
1814 /**
1815  * enum event_command_flags - flags for struct event_command
1816  *
1817  * @POST_TRIGGER: A flag that says whether or not this command needs
1818  *	to have its action delayed until after the current event has
1819  *	been closed.  Some triggers need to avoid being invoked while
1820  *	an event is currently in the process of being logged, since
1821  *	the trigger may itself log data into the trace buffer.  Thus
1822  *	we make sure the current event is committed before invoking
1823  *	those triggers.  To do that, the trigger invocation is split
1824  *	in two - the first part checks the filter using the current
1825  *	trace record; if a command has the @post_trigger flag set, it
1826  *	sets a bit for itself in the return value, otherwise it
1827  *	directly invokes the trigger.  Once all commands have been
1828  *	either invoked or set their return flag, the current record is
1829  *	either committed or discarded.  At that point, if any commands
1830  *	have deferred their triggers, those commands are finally
1831  *	invoked following the close of the current event.  In other
1832  *	words, if the event_trigger_ops @func() probe implementation
1833  *	itself logs to the trace buffer, this flag should be set,
1834  *	otherwise it can be left unspecified.
1835  *
1836  * @NEEDS_REC: A flag that says whether or not this command needs
1837  *	access to the trace record in order to perform its function,
1838  *	regardless of whether or not it has a filter associated with
1839  *	it (filters make a trigger require access to the trace record
1840  *	but are not always present).
1841  */
1842 enum event_command_flags {
1843 	EVENT_CMD_FL_POST_TRIGGER	= 1,
1844 	EVENT_CMD_FL_NEEDS_REC		= 2,
1845 };
1846 
1847 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1848 {
1849 	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1850 }
1851 
1852 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1853 {
1854 	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1855 }
1856 
1857 extern int trace_event_enable_disable(struct trace_event_file *file,
1858 				      int enable, int soft_disable);
1859 extern int tracing_alloc_snapshot(void);
1860 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1861 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1862 
1863 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1864 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1865 
1866 extern const char *__start___trace_bprintk_fmt[];
1867 extern const char *__stop___trace_bprintk_fmt[];
1868 
1869 extern const char *__start___tracepoint_str[];
1870 extern const char *__stop___tracepoint_str[];
1871 
1872 void trace_printk_control(bool enabled);
1873 void trace_printk_start_comm(void);
1874 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1875 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1876 
1877 /* Used from boot time tracer */
1878 extern int trace_set_options(struct trace_array *tr, char *option);
1879 extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1880 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1881 					  unsigned long size, int cpu_id);
1882 extern int tracing_set_cpumask(struct trace_array *tr,
1883 				cpumask_var_t tracing_cpumask_new);
1884 
1885 
1886 #define MAX_EVENT_NAME_LEN	64
1887 
1888 extern ssize_t trace_parse_run_command(struct file *file,
1889 		const char __user *buffer, size_t count, loff_t *ppos,
1890 		int (*createfn)(const char *));
1891 
1892 extern unsigned int err_pos(char *cmd, const char *str);
1893 extern void tracing_log_err(struct trace_array *tr,
1894 			    const char *loc, const char *cmd,
1895 			    const char **errs, u8 type, u16 pos);
1896 
1897 /*
1898  * Normal trace_printk() and friends allocates special buffers
1899  * to do the manipulation, as well as saves the print formats
1900  * into sections to display. But the trace infrastructure wants
1901  * to use these without the added overhead at the price of being
1902  * a bit slower (used mainly for warnings, where we don't care
1903  * about performance). The internal_trace_puts() is for such
1904  * a purpose.
1905  */
1906 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1907 
1908 #undef FTRACE_ENTRY
1909 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)	\
1910 	extern struct trace_event_call					\
1911 	__aligned(4) event_##call;
1912 #undef FTRACE_ENTRY_DUP
1913 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)	\
1914 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1915 #undef FTRACE_ENTRY_PACKED
1916 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1917 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1918 
1919 #include "trace_entries.h"
1920 
1921 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1922 int perf_ftrace_event_register(struct trace_event_call *call,
1923 			       enum trace_reg type, void *data);
1924 #else
1925 #define perf_ftrace_event_register NULL
1926 #endif
1927 
1928 #ifdef CONFIG_FTRACE_SYSCALLS
1929 void init_ftrace_syscalls(void);
1930 const char *get_syscall_name(int syscall);
1931 #else
1932 static inline void init_ftrace_syscalls(void) { }
1933 static inline const char *get_syscall_name(int syscall)
1934 {
1935 	return NULL;
1936 }
1937 #endif
1938 
1939 #ifdef CONFIG_EVENT_TRACING
1940 void trace_event_init(void);
1941 void trace_event_eval_update(struct trace_eval_map **map, int len);
1942 /* Used from boot time tracer */
1943 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1944 extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1945 #else
1946 static inline void __init trace_event_init(void) { }
1947 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1948 #endif
1949 
1950 #ifdef CONFIG_TRACER_SNAPSHOT
1951 void tracing_snapshot_instance(struct trace_array *tr);
1952 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1953 #else
1954 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1955 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1956 {
1957 	return 0;
1958 }
1959 #endif
1960 
1961 #ifdef CONFIG_PREEMPT_TRACER
1962 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1963 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1964 #else
1965 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1966 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1967 #endif
1968 #ifdef CONFIG_IRQSOFF_TRACER
1969 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1970 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1971 #else
1972 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1973 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1974 #endif
1975 
1976 /*
1977  * Reset the state of the trace_iterator so that it can read consumed data.
1978  * Normally, the trace_iterator is used for reading the data when it is not
1979  * consumed, and must retain state.
1980  */
1981 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1982 {
1983 	memset_startat(iter, 0, seq);
1984 	iter->pos = -1;
1985 }
1986 
1987 /* Check the name is good for event/group/fields */
1988 static inline bool __is_good_name(const char *name, bool hash_ok)
1989 {
1990 	if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
1991 		return false;
1992 	while (*++name != '\0') {
1993 		if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
1994 		    (!hash_ok || *name != '-'))
1995 			return false;
1996 	}
1997 	return true;
1998 }
1999 
2000 /* Check the name is good for event/group/fields */
2001 static inline bool is_good_name(const char *name)
2002 {
2003 	return __is_good_name(name, false);
2004 }
2005 
2006 /* Check the name is good for system */
2007 static inline bool is_good_system_name(const char *name)
2008 {
2009 	return __is_good_name(name, true);
2010 }
2011 
2012 /* Convert certain expected symbols into '_' when generating event names */
2013 static inline void sanitize_event_name(char *name)
2014 {
2015 	while (*name++ != '\0')
2016 		if (*name == ':' || *name == '.')
2017 			*name = '_';
2018 }
2019 
2020 /*
2021  * This is a generic way to read and write a u64 value from a file in tracefs.
2022  *
2023  * The value is stored on the variable pointed by *val. The value needs
2024  * to be at least *min and at most *max. The write is protected by an
2025  * existing *lock.
2026  */
2027 struct trace_min_max_param {
2028 	struct mutex	*lock;
2029 	u64		*val;
2030 	u64		*min;
2031 	u64		*max;
2032 };
2033 
2034 #define U64_STR_SIZE		24	/* 20 digits max */
2035 
2036 extern const struct file_operations trace_min_max_fops;
2037 
2038 #ifdef CONFIG_RV
2039 extern int rv_init_interface(void);
2040 #else
2041 static inline int rv_init_interface(void)
2042 {
2043 	return 0;
2044 }
2045 #endif
2046 
2047 #endif /* _LINUX_KERNEL_TRACE_H */
2048