xref: /openbmc/linux/kernel/trace/trace.h (revision 6774def6)
1 
2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
4 
5 #include <linux/fs.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/ftrace_event.h>
16 #include <linux/compiler.h>
17 
18 #ifdef CONFIG_FTRACE_SYSCALLS
19 #include <asm/unistd.h>		/* For NR_SYSCALLS	     */
20 #include <asm/syscall.h>	/* some archs define it here */
21 #endif
22 
23 enum trace_type {
24 	__TRACE_FIRST_TYPE = 0,
25 
26 	TRACE_FN,
27 	TRACE_CTX,
28 	TRACE_WAKE,
29 	TRACE_STACK,
30 	TRACE_PRINT,
31 	TRACE_BPRINT,
32 	TRACE_MMIO_RW,
33 	TRACE_MMIO_MAP,
34 	TRACE_BRANCH,
35 	TRACE_GRAPH_RET,
36 	TRACE_GRAPH_ENT,
37 	TRACE_USER_STACK,
38 	TRACE_BLK,
39 	TRACE_BPUTS,
40 
41 	__TRACE_LAST_TYPE,
42 };
43 
44 
45 #undef __field
46 #define __field(type, item)		type	item;
47 
48 #undef __field_struct
49 #define __field_struct(type, item)	__field(type, item)
50 
51 #undef __field_desc
52 #define __field_desc(type, container, item)
53 
54 #undef __array
55 #define __array(type, item, size)	type	item[size];
56 
57 #undef __array_desc
58 #define __array_desc(type, container, item, size)
59 
60 #undef __dynamic_array
61 #define __dynamic_array(type, item)	type	item[];
62 
63 #undef F_STRUCT
64 #define F_STRUCT(args...)		args
65 
66 #undef FTRACE_ENTRY
67 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
68 	struct struct_name {						\
69 		struct trace_entry	ent;				\
70 		tstruct							\
71 	}
72 
73 #undef TP_ARGS
74 #define TP_ARGS(args...)	args
75 
76 #undef FTRACE_ENTRY_DUP
77 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
78 
79 #undef FTRACE_ENTRY_REG
80 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
81 			 filter, regfn) \
82 	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
83 		     filter)
84 
85 #include "trace_entries.h"
86 
87 /*
88  * syscalls are special, and need special handling, this is why
89  * they are not included in trace_entries.h
90  */
91 struct syscall_trace_enter {
92 	struct trace_entry	ent;
93 	int			nr;
94 	unsigned long		args[];
95 };
96 
97 struct syscall_trace_exit {
98 	struct trace_entry	ent;
99 	int			nr;
100 	long			ret;
101 };
102 
103 struct kprobe_trace_entry_head {
104 	struct trace_entry	ent;
105 	unsigned long		ip;
106 };
107 
108 struct kretprobe_trace_entry_head {
109 	struct trace_entry	ent;
110 	unsigned long		func;
111 	unsigned long		ret_ip;
112 };
113 
114 /*
115  * trace_flag_type is an enumeration that holds different
116  * states when a trace occurs. These are:
117  *  IRQS_OFF		- interrupts were disabled
118  *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
119  *  NEED_RESCHED	- reschedule is requested
120  *  HARDIRQ		- inside an interrupt handler
121  *  SOFTIRQ		- inside a softirq handler
122  */
123 enum trace_flag_type {
124 	TRACE_FLAG_IRQS_OFF		= 0x01,
125 	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
126 	TRACE_FLAG_NEED_RESCHED		= 0x04,
127 	TRACE_FLAG_HARDIRQ		= 0x08,
128 	TRACE_FLAG_SOFTIRQ		= 0x10,
129 	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
130 };
131 
132 #define TRACE_BUF_SIZE		1024
133 
134 struct trace_array;
135 
136 /*
137  * The CPU trace array - it consists of thousands of trace entries
138  * plus some other descriptor data: (for example which task started
139  * the trace, etc.)
140  */
141 struct trace_array_cpu {
142 	atomic_t		disabled;
143 	void			*buffer_page;	/* ring buffer spare */
144 
145 	unsigned long		entries;
146 	unsigned long		saved_latency;
147 	unsigned long		critical_start;
148 	unsigned long		critical_end;
149 	unsigned long		critical_sequence;
150 	unsigned long		nice;
151 	unsigned long		policy;
152 	unsigned long		rt_priority;
153 	unsigned long		skipped_entries;
154 	cycle_t			preempt_timestamp;
155 	pid_t			pid;
156 	kuid_t			uid;
157 	char			comm[TASK_COMM_LEN];
158 };
159 
160 struct tracer;
161 
162 struct trace_buffer {
163 	struct trace_array		*tr;
164 	struct ring_buffer		*buffer;
165 	struct trace_array_cpu __percpu	*data;
166 	cycle_t				time_start;
167 	int				cpu;
168 };
169 
170 /*
171  * The trace array - an array of per-CPU trace arrays. This is the
172  * highest level data structure that individual tracers deal with.
173  * They have on/off state as well:
174  */
175 struct trace_array {
176 	struct list_head	list;
177 	char			*name;
178 	struct trace_buffer	trace_buffer;
179 #ifdef CONFIG_TRACER_MAX_TRACE
180 	/*
181 	 * The max_buffer is used to snapshot the trace when a maximum
182 	 * latency is reached, or when the user initiates a snapshot.
183 	 * Some tracers will use this to store a maximum trace while
184 	 * it continues examining live traces.
185 	 *
186 	 * The buffers for the max_buffer are set up the same as the trace_buffer
187 	 * When a snapshot is taken, the buffer of the max_buffer is swapped
188 	 * with the buffer of the trace_buffer and the buffers are reset for
189 	 * the trace_buffer so the tracing can continue.
190 	 */
191 	struct trace_buffer	max_buffer;
192 	bool			allocated_snapshot;
193 	unsigned long		max_latency;
194 #endif
195 	/*
196 	 * max_lock is used to protect the swapping of buffers
197 	 * when taking a max snapshot. The buffers themselves are
198 	 * protected by per_cpu spinlocks. But the action of the swap
199 	 * needs its own lock.
200 	 *
201 	 * This is defined as a arch_spinlock_t in order to help
202 	 * with performance when lockdep debugging is enabled.
203 	 *
204 	 * It is also used in other places outside the update_max_tr
205 	 * so it needs to be defined outside of the
206 	 * CONFIG_TRACER_MAX_TRACE.
207 	 */
208 	arch_spinlock_t		max_lock;
209 	int			buffer_disabled;
210 #ifdef CONFIG_FTRACE_SYSCALLS
211 	int			sys_refcount_enter;
212 	int			sys_refcount_exit;
213 	struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
214 	struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
215 #endif
216 	int			stop_count;
217 	int			clock_id;
218 	struct tracer		*current_trace;
219 	unsigned int		flags;
220 	raw_spinlock_t		start_lock;
221 	struct dentry		*dir;
222 	struct dentry		*options;
223 	struct dentry		*percpu_dir;
224 	struct dentry		*event_dir;
225 	struct list_head	systems;
226 	struct list_head	events;
227 	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
228 	int			ref;
229 #ifdef CONFIG_FUNCTION_TRACER
230 	struct ftrace_ops	*ops;
231 	/* function tracing enabled */
232 	int			function_enabled;
233 #endif
234 };
235 
236 enum {
237 	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
238 };
239 
240 extern struct list_head ftrace_trace_arrays;
241 
242 extern struct mutex trace_types_lock;
243 
244 extern int trace_array_get(struct trace_array *tr);
245 extern void trace_array_put(struct trace_array *tr);
246 
247 /*
248  * The global tracer (top) should be the first trace array added,
249  * but we check the flag anyway.
250  */
251 static inline struct trace_array *top_trace_array(void)
252 {
253 	struct trace_array *tr;
254 
255 	if (list_empty(&ftrace_trace_arrays))
256 		return NULL;
257 
258 	tr = list_entry(ftrace_trace_arrays.prev,
259 			typeof(*tr), list);
260 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
261 	return tr;
262 }
263 
264 #define FTRACE_CMP_TYPE(var, type) \
265 	__builtin_types_compatible_p(typeof(var), type *)
266 
267 #undef IF_ASSIGN
268 #define IF_ASSIGN(var, entry, etype, id)		\
269 	if (FTRACE_CMP_TYPE(var, etype)) {		\
270 		var = (typeof(var))(entry);		\
271 		WARN_ON(id && (entry)->type != id);	\
272 		break;					\
273 	}
274 
275 /* Will cause compile errors if type is not found. */
276 extern void __ftrace_bad_type(void);
277 
278 /*
279  * The trace_assign_type is a verifier that the entry type is
280  * the same as the type being assigned. To add new types simply
281  * add a line with the following format:
282  *
283  * IF_ASSIGN(var, ent, type, id);
284  *
285  *  Where "type" is the trace type that includes the trace_entry
286  *  as the "ent" item. And "id" is the trace identifier that is
287  *  used in the trace_type enum.
288  *
289  *  If the type can have more than one id, then use zero.
290  */
291 #define trace_assign_type(var, ent)					\
292 	do {								\
293 		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
294 		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
295 		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
296 		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
297 		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
298 		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
299 		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
300 		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
301 			  TRACE_MMIO_RW);				\
302 		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
303 			  TRACE_MMIO_MAP);				\
304 		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
305 		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
306 			  TRACE_GRAPH_ENT);		\
307 		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
308 			  TRACE_GRAPH_RET);		\
309 		__ftrace_bad_type();					\
310 	} while (0)
311 
312 /*
313  * An option specific to a tracer. This is a boolean value.
314  * The bit is the bit index that sets its value on the
315  * flags value in struct tracer_flags.
316  */
317 struct tracer_opt {
318 	const char	*name; /* Will appear on the trace_options file */
319 	u32		bit; /* Mask assigned in val field in tracer_flags */
320 };
321 
322 /*
323  * The set of specific options for a tracer. Your tracer
324  * have to set the initial value of the flags val.
325  */
326 struct tracer_flags {
327 	u32			val;
328 	struct tracer_opt	*opts;
329 };
330 
331 /* Makes more easy to define a tracer opt */
332 #define TRACER_OPT(s, b)	.name = #s, .bit = b
333 
334 
335 /**
336  * struct tracer - a specific tracer and its callbacks to interact with debugfs
337  * @name: the name chosen to select it on the available_tracers file
338  * @init: called when one switches to this tracer (echo name > current_tracer)
339  * @reset: called when one switches to another tracer
340  * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
341  * @stop: called when tracing is paused (echo 0 > tracing_enabled)
342  * @update_thresh: called when tracing_thresh is updated
343  * @open: called when the trace file is opened
344  * @pipe_open: called when the trace_pipe file is opened
345  * @close: called when the trace file is released
346  * @pipe_close: called when the trace_pipe file is released
347  * @read: override the default read callback on trace_pipe
348  * @splice_read: override the default splice_read callback on trace_pipe
349  * @selftest: selftest to run on boot (see trace_selftest.c)
350  * @print_headers: override the first lines that describe your columns
351  * @print_line: callback that prints a trace
352  * @set_flag: signals one of your private flags changed (trace_options file)
353  * @flags: your private flags
354  */
355 struct tracer {
356 	const char		*name;
357 	int			(*init)(struct trace_array *tr);
358 	void			(*reset)(struct trace_array *tr);
359 	void			(*start)(struct trace_array *tr);
360 	void			(*stop)(struct trace_array *tr);
361 	int			(*update_thresh)(struct trace_array *tr);
362 	void			(*open)(struct trace_iterator *iter);
363 	void			(*pipe_open)(struct trace_iterator *iter);
364 	void			(*close)(struct trace_iterator *iter);
365 	void			(*pipe_close)(struct trace_iterator *iter);
366 	ssize_t			(*read)(struct trace_iterator *iter,
367 					struct file *filp, char __user *ubuf,
368 					size_t cnt, loff_t *ppos);
369 	ssize_t			(*splice_read)(struct trace_iterator *iter,
370 					       struct file *filp,
371 					       loff_t *ppos,
372 					       struct pipe_inode_info *pipe,
373 					       size_t len,
374 					       unsigned int flags);
375 #ifdef CONFIG_FTRACE_STARTUP_TEST
376 	int			(*selftest)(struct tracer *trace,
377 					    struct trace_array *tr);
378 #endif
379 	void			(*print_header)(struct seq_file *m);
380 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
381 	/* If you handled the flag setting, return 0 */
382 	int			(*set_flag)(struct trace_array *tr,
383 					    u32 old_flags, u32 bit, int set);
384 	/* Return 0 if OK with change, else return non-zero */
385 	int			(*flag_changed)(struct trace_array *tr,
386 						u32 mask, int set);
387 	struct tracer		*next;
388 	struct tracer_flags	*flags;
389 	int			enabled;
390 	bool			print_max;
391 	bool			allow_instances;
392 #ifdef CONFIG_TRACER_MAX_TRACE
393 	bool			use_max_tr;
394 #endif
395 };
396 
397 
398 /* Only current can touch trace_recursion */
399 
400 /*
401  * For function tracing recursion:
402  *  The order of these bits are important.
403  *
404  *  When function tracing occurs, the following steps are made:
405  *   If arch does not support a ftrace feature:
406  *    call internal function (uses INTERNAL bits) which calls...
407  *   If callback is registered to the "global" list, the list
408  *    function is called and recursion checks the GLOBAL bits.
409  *    then this function calls...
410  *   The function callback, which can use the FTRACE bits to
411  *    check for recursion.
412  *
413  * Now if the arch does not suppport a feature, and it calls
414  * the global list function which calls the ftrace callback
415  * all three of these steps will do a recursion protection.
416  * There's no reason to do one if the previous caller already
417  * did. The recursion that we are protecting against will
418  * go through the same steps again.
419  *
420  * To prevent the multiple recursion checks, if a recursion
421  * bit is set that is higher than the MAX bit of the current
422  * check, then we know that the check was made by the previous
423  * caller, and we can skip the current check.
424  */
425 enum {
426 	TRACE_BUFFER_BIT,
427 	TRACE_BUFFER_NMI_BIT,
428 	TRACE_BUFFER_IRQ_BIT,
429 	TRACE_BUFFER_SIRQ_BIT,
430 
431 	/* Start of function recursion bits */
432 	TRACE_FTRACE_BIT,
433 	TRACE_FTRACE_NMI_BIT,
434 	TRACE_FTRACE_IRQ_BIT,
435 	TRACE_FTRACE_SIRQ_BIT,
436 
437 	/* INTERNAL_BITs must be greater than FTRACE_BITs */
438 	TRACE_INTERNAL_BIT,
439 	TRACE_INTERNAL_NMI_BIT,
440 	TRACE_INTERNAL_IRQ_BIT,
441 	TRACE_INTERNAL_SIRQ_BIT,
442 
443 	TRACE_CONTROL_BIT,
444 
445 /*
446  * Abuse of the trace_recursion.
447  * As we need a way to maintain state if we are tracing the function
448  * graph in irq because we want to trace a particular function that
449  * was called in irq context but we have irq tracing off. Since this
450  * can only be modified by current, we can reuse trace_recursion.
451  */
452 	TRACE_IRQ_BIT,
453 };
454 
455 #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
456 #define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
457 #define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
458 
459 #define TRACE_CONTEXT_BITS	4
460 
461 #define TRACE_FTRACE_START	TRACE_FTRACE_BIT
462 #define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
463 
464 #define TRACE_LIST_START	TRACE_INTERNAL_BIT
465 #define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
466 
467 #define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
468 
469 static __always_inline int trace_get_context_bit(void)
470 {
471 	int bit;
472 
473 	if (in_interrupt()) {
474 		if (in_nmi())
475 			bit = 0;
476 
477 		else if (in_irq())
478 			bit = 1;
479 		else
480 			bit = 2;
481 	} else
482 		bit = 3;
483 
484 	return bit;
485 }
486 
487 static __always_inline int trace_test_and_set_recursion(int start, int max)
488 {
489 	unsigned int val = current->trace_recursion;
490 	int bit;
491 
492 	/* A previous recursion check was made */
493 	if ((val & TRACE_CONTEXT_MASK) > max)
494 		return 0;
495 
496 	bit = trace_get_context_bit() + start;
497 	if (unlikely(val & (1 << bit)))
498 		return -1;
499 
500 	val |= 1 << bit;
501 	current->trace_recursion = val;
502 	barrier();
503 
504 	return bit;
505 }
506 
507 static __always_inline void trace_clear_recursion(int bit)
508 {
509 	unsigned int val = current->trace_recursion;
510 
511 	if (!bit)
512 		return;
513 
514 	bit = 1 << bit;
515 	val &= ~bit;
516 
517 	barrier();
518 	current->trace_recursion = val;
519 }
520 
521 static inline struct ring_buffer_iter *
522 trace_buffer_iter(struct trace_iterator *iter, int cpu)
523 {
524 	if (iter->buffer_iter && iter->buffer_iter[cpu])
525 		return iter->buffer_iter[cpu];
526 	return NULL;
527 }
528 
529 int tracer_init(struct tracer *t, struct trace_array *tr);
530 int tracing_is_enabled(void);
531 void tracing_reset(struct trace_buffer *buf, int cpu);
532 void tracing_reset_online_cpus(struct trace_buffer *buf);
533 void tracing_reset_current(int cpu);
534 void tracing_reset_all_online_cpus(void);
535 int tracing_open_generic(struct inode *inode, struct file *filp);
536 bool tracing_is_disabled(void);
537 struct dentry *trace_create_file(const char *name,
538 				 umode_t mode,
539 				 struct dentry *parent,
540 				 void *data,
541 				 const struct file_operations *fops);
542 
543 struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
544 struct dentry *tracing_init_dentry(void);
545 
546 struct ring_buffer_event;
547 
548 struct ring_buffer_event *
549 trace_buffer_lock_reserve(struct ring_buffer *buffer,
550 			  int type,
551 			  unsigned long len,
552 			  unsigned long flags,
553 			  int pc);
554 
555 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
556 						struct trace_array_cpu *data);
557 
558 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
559 					  int *ent_cpu, u64 *ent_ts);
560 
561 void __buffer_unlock_commit(struct ring_buffer *buffer,
562 			    struct ring_buffer_event *event);
563 
564 int trace_empty(struct trace_iterator *iter);
565 
566 void *trace_find_next_entry_inc(struct trace_iterator *iter);
567 
568 void trace_init_global_iter(struct trace_iterator *iter);
569 
570 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
571 
572 void tracing_sched_switch_trace(struct trace_array *tr,
573 				struct task_struct *prev,
574 				struct task_struct *next,
575 				unsigned long flags, int pc);
576 
577 void tracing_sched_wakeup_trace(struct trace_array *tr,
578 				struct task_struct *wakee,
579 				struct task_struct *cur,
580 				unsigned long flags, int pc);
581 void trace_function(struct trace_array *tr,
582 		    unsigned long ip,
583 		    unsigned long parent_ip,
584 		    unsigned long flags, int pc);
585 void trace_graph_function(struct trace_array *tr,
586 		    unsigned long ip,
587 		    unsigned long parent_ip,
588 		    unsigned long flags, int pc);
589 void trace_latency_header(struct seq_file *m);
590 void trace_default_header(struct seq_file *m);
591 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
592 int trace_empty(struct trace_iterator *iter);
593 
594 void trace_graph_return(struct ftrace_graph_ret *trace);
595 int trace_graph_entry(struct ftrace_graph_ent *trace);
596 void set_graph_array(struct trace_array *tr);
597 
598 void tracing_start_cmdline_record(void);
599 void tracing_stop_cmdline_record(void);
600 void tracing_sched_switch_assign_trace(struct trace_array *tr);
601 void tracing_stop_sched_switch_record(void);
602 void tracing_start_sched_switch_record(void);
603 int register_tracer(struct tracer *type);
604 int is_tracing_stopped(void);
605 
606 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
607 
608 extern cpumask_var_t __read_mostly tracing_buffer_mask;
609 
610 #define for_each_tracing_cpu(cpu)	\
611 	for_each_cpu(cpu, tracing_buffer_mask)
612 
613 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
614 
615 extern unsigned long tracing_thresh;
616 
617 #ifdef CONFIG_TRACER_MAX_TRACE
618 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
619 void update_max_tr_single(struct trace_array *tr,
620 			  struct task_struct *tsk, int cpu);
621 #endif /* CONFIG_TRACER_MAX_TRACE */
622 
623 #ifdef CONFIG_STACKTRACE
624 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
625 			int skip, int pc);
626 
627 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
628 			     int skip, int pc, struct pt_regs *regs);
629 
630 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
631 			    int pc);
632 
633 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
634 		   int pc);
635 #else
636 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
637 				      unsigned long flags, int skip, int pc)
638 {
639 }
640 
641 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
642 					   unsigned long flags, int skip,
643 					   int pc, struct pt_regs *regs)
644 {
645 }
646 
647 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
648 					  unsigned long flags, int pc)
649 {
650 }
651 
652 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
653 				 int skip, int pc)
654 {
655 }
656 #endif /* CONFIG_STACKTRACE */
657 
658 extern cycle_t ftrace_now(int cpu);
659 
660 extern void trace_find_cmdline(int pid, char comm[]);
661 
662 #ifdef CONFIG_DYNAMIC_FTRACE
663 extern unsigned long ftrace_update_tot_cnt;
664 #endif
665 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
666 extern int DYN_FTRACE_TEST_NAME(void);
667 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
668 extern int DYN_FTRACE_TEST_NAME2(void);
669 
670 extern bool ring_buffer_expanded;
671 extern bool tracing_selftest_disabled;
672 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
673 
674 #ifdef CONFIG_FTRACE_STARTUP_TEST
675 extern int trace_selftest_startup_function(struct tracer *trace,
676 					   struct trace_array *tr);
677 extern int trace_selftest_startup_function_graph(struct tracer *trace,
678 						 struct trace_array *tr);
679 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
680 					  struct trace_array *tr);
681 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
682 					     struct trace_array *tr);
683 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
684 						 struct trace_array *tr);
685 extern int trace_selftest_startup_wakeup(struct tracer *trace,
686 					 struct trace_array *tr);
687 extern int trace_selftest_startup_nop(struct tracer *trace,
688 					 struct trace_array *tr);
689 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
690 					       struct trace_array *tr);
691 extern int trace_selftest_startup_branch(struct tracer *trace,
692 					 struct trace_array *tr);
693 /*
694  * Tracer data references selftest functions that only occur
695  * on boot up. These can be __init functions. Thus, when selftests
696  * are enabled, then the tracers need to reference __init functions.
697  */
698 #define __tracer_data		__refdata
699 #else
700 /* Tracers are seldom changed. Optimize when selftests are disabled. */
701 #define __tracer_data		__read_mostly
702 #endif /* CONFIG_FTRACE_STARTUP_TEST */
703 
704 extern void *head_page(struct trace_array_cpu *data);
705 extern unsigned long long ns2usecs(cycle_t nsec);
706 extern int
707 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
708 extern int
709 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
710 extern int
711 trace_array_vprintk(struct trace_array *tr,
712 		    unsigned long ip, const char *fmt, va_list args);
713 int trace_array_printk(struct trace_array *tr,
714 		       unsigned long ip, const char *fmt, ...);
715 int trace_array_printk_buf(struct ring_buffer *buffer,
716 			   unsigned long ip, const char *fmt, ...);
717 void trace_printk_seq(struct trace_seq *s);
718 enum print_line_t print_trace_line(struct trace_iterator *iter);
719 
720 extern unsigned long trace_flags;
721 
722 /* Standard output formatting function used for function return traces */
723 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
724 
725 /* Flag options */
726 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
727 #define TRACE_GRAPH_PRINT_CPU           0x2
728 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
729 #define TRACE_GRAPH_PRINT_PROC          0x8
730 #define TRACE_GRAPH_PRINT_DURATION      0x10
731 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
732 #define TRACE_GRAPH_PRINT_IRQS          0x40
733 #define TRACE_GRAPH_PRINT_TAIL          0x80
734 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
735 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
736 
737 extern enum print_line_t
738 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
739 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
740 extern enum print_line_t
741 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
742 extern void graph_trace_open(struct trace_iterator *iter);
743 extern void graph_trace_close(struct trace_iterator *iter);
744 extern int __trace_graph_entry(struct trace_array *tr,
745 			       struct ftrace_graph_ent *trace,
746 			       unsigned long flags, int pc);
747 extern void __trace_graph_return(struct trace_array *tr,
748 				 struct ftrace_graph_ret *trace,
749 				 unsigned long flags, int pc);
750 
751 
752 #ifdef CONFIG_DYNAMIC_FTRACE
753 /* TODO: make this variable */
754 #define FTRACE_GRAPH_MAX_FUNCS		32
755 extern int ftrace_graph_count;
756 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
757 extern int ftrace_graph_notrace_count;
758 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
759 
760 static inline int ftrace_graph_addr(unsigned long addr)
761 {
762 	int i;
763 
764 	if (!ftrace_graph_count)
765 		return 1;
766 
767 	for (i = 0; i < ftrace_graph_count; i++) {
768 		if (addr == ftrace_graph_funcs[i]) {
769 			/*
770 			 * If no irqs are to be traced, but a set_graph_function
771 			 * is set, and called by an interrupt handler, we still
772 			 * want to trace it.
773 			 */
774 			if (in_irq())
775 				trace_recursion_set(TRACE_IRQ_BIT);
776 			else
777 				trace_recursion_clear(TRACE_IRQ_BIT);
778 			return 1;
779 		}
780 	}
781 
782 	return 0;
783 }
784 
785 static inline int ftrace_graph_notrace_addr(unsigned long addr)
786 {
787 	int i;
788 
789 	if (!ftrace_graph_notrace_count)
790 		return 0;
791 
792 	for (i = 0; i < ftrace_graph_notrace_count; i++) {
793 		if (addr == ftrace_graph_notrace_funcs[i])
794 			return 1;
795 	}
796 
797 	return 0;
798 }
799 #else
800 static inline int ftrace_graph_addr(unsigned long addr)
801 {
802 	return 1;
803 }
804 
805 static inline int ftrace_graph_notrace_addr(unsigned long addr)
806 {
807 	return 0;
808 }
809 #endif /* CONFIG_DYNAMIC_FTRACE */
810 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
811 static inline enum print_line_t
812 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
813 {
814 	return TRACE_TYPE_UNHANDLED;
815 }
816 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
817 
818 extern struct list_head ftrace_pids;
819 
820 #ifdef CONFIG_FUNCTION_TRACER
821 extern bool ftrace_filter_param __initdata;
822 static inline int ftrace_trace_task(struct task_struct *task)
823 {
824 	if (list_empty(&ftrace_pids))
825 		return 1;
826 
827 	return test_tsk_trace_trace(task);
828 }
829 extern int ftrace_is_dead(void);
830 int ftrace_create_function_files(struct trace_array *tr,
831 				 struct dentry *parent);
832 void ftrace_destroy_function_files(struct trace_array *tr);
833 void ftrace_init_global_array_ops(struct trace_array *tr);
834 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
835 void ftrace_reset_array_ops(struct trace_array *tr);
836 int using_ftrace_ops_list_func(void);
837 #else
838 static inline int ftrace_trace_task(struct task_struct *task)
839 {
840 	return 1;
841 }
842 static inline int ftrace_is_dead(void) { return 0; }
843 static inline int
844 ftrace_create_function_files(struct trace_array *tr,
845 			     struct dentry *parent)
846 {
847 	return 0;
848 }
849 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
850 static inline __init void
851 ftrace_init_global_array_ops(struct trace_array *tr) { }
852 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
853 /* ftace_func_t type is not defined, use macro instead of static inline */
854 #define ftrace_init_array_ops(tr, func) do { } while (0)
855 #endif /* CONFIG_FUNCTION_TRACER */
856 
857 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
858 void ftrace_create_filter_files(struct ftrace_ops *ops,
859 				struct dentry *parent);
860 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
861 #else
862 /*
863  * The ops parameter passed in is usually undefined.
864  * This must be a macro.
865  */
866 #define ftrace_create_filter_files(ops, parent) do { } while (0)
867 #define ftrace_destroy_filter_files(ops) do { } while (0)
868 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
869 
870 int ftrace_event_is_function(struct ftrace_event_call *call);
871 
872 /*
873  * struct trace_parser - servers for reading the user input separated by spaces
874  * @cont: set if the input is not complete - no final space char was found
875  * @buffer: holds the parsed user input
876  * @idx: user input length
877  * @size: buffer size
878  */
879 struct trace_parser {
880 	bool		cont;
881 	char		*buffer;
882 	unsigned	idx;
883 	unsigned	size;
884 };
885 
886 static inline bool trace_parser_loaded(struct trace_parser *parser)
887 {
888 	return (parser->idx != 0);
889 }
890 
891 static inline bool trace_parser_cont(struct trace_parser *parser)
892 {
893 	return parser->cont;
894 }
895 
896 static inline void trace_parser_clear(struct trace_parser *parser)
897 {
898 	parser->cont = false;
899 	parser->idx = 0;
900 }
901 
902 extern int trace_parser_get_init(struct trace_parser *parser, int size);
903 extern void trace_parser_put(struct trace_parser *parser);
904 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
905 	size_t cnt, loff_t *ppos);
906 
907 /*
908  * trace_iterator_flags is an enumeration that defines bit
909  * positions into trace_flags that controls the output.
910  *
911  * NOTE: These bits must match the trace_options array in
912  *       trace.c.
913  */
914 enum trace_iterator_flags {
915 	TRACE_ITER_PRINT_PARENT		= 0x01,
916 	TRACE_ITER_SYM_OFFSET		= 0x02,
917 	TRACE_ITER_SYM_ADDR		= 0x04,
918 	TRACE_ITER_VERBOSE		= 0x08,
919 	TRACE_ITER_RAW			= 0x10,
920 	TRACE_ITER_HEX			= 0x20,
921 	TRACE_ITER_BIN			= 0x40,
922 	TRACE_ITER_BLOCK		= 0x80,
923 	TRACE_ITER_STACKTRACE		= 0x100,
924 	TRACE_ITER_PRINTK		= 0x200,
925 	TRACE_ITER_PREEMPTONLY		= 0x400,
926 	TRACE_ITER_BRANCH		= 0x800,
927 	TRACE_ITER_ANNOTATE		= 0x1000,
928 	TRACE_ITER_USERSTACKTRACE       = 0x2000,
929 	TRACE_ITER_SYM_USEROBJ          = 0x4000,
930 	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
931 	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
932 	TRACE_ITER_LATENCY_FMT		= 0x20000,
933 	TRACE_ITER_SLEEP_TIME		= 0x40000,
934 	TRACE_ITER_GRAPH_TIME		= 0x80000,
935 	TRACE_ITER_RECORD_CMD		= 0x100000,
936 	TRACE_ITER_OVERWRITE		= 0x200000,
937 	TRACE_ITER_STOP_ON_FREE		= 0x400000,
938 	TRACE_ITER_IRQ_INFO		= 0x800000,
939 	TRACE_ITER_MARKERS		= 0x1000000,
940 	TRACE_ITER_FUNCTION		= 0x2000000,
941 };
942 
943 /*
944  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
945  * control the output of kernel symbols.
946  */
947 #define TRACE_ITER_SYM_MASK \
948 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
949 
950 extern struct tracer nop_trace;
951 
952 #ifdef CONFIG_BRANCH_TRACER
953 extern int enable_branch_tracing(struct trace_array *tr);
954 extern void disable_branch_tracing(void);
955 static inline int trace_branch_enable(struct trace_array *tr)
956 {
957 	if (trace_flags & TRACE_ITER_BRANCH)
958 		return enable_branch_tracing(tr);
959 	return 0;
960 }
961 static inline void trace_branch_disable(void)
962 {
963 	/* due to races, always disable */
964 	disable_branch_tracing();
965 }
966 #else
967 static inline int trace_branch_enable(struct trace_array *tr)
968 {
969 	return 0;
970 }
971 static inline void trace_branch_disable(void)
972 {
973 }
974 #endif /* CONFIG_BRANCH_TRACER */
975 
976 /* set ring buffers to default size if not already done so */
977 int tracing_update_buffers(void);
978 
979 struct ftrace_event_field {
980 	struct list_head	link;
981 	const char		*name;
982 	const char		*type;
983 	int			filter_type;
984 	int			offset;
985 	int			size;
986 	int			is_signed;
987 };
988 
989 struct event_filter {
990 	int			n_preds;	/* Number assigned */
991 	int			a_preds;	/* allocated */
992 	struct filter_pred	*preds;
993 	struct filter_pred	*root;
994 	char			*filter_string;
995 };
996 
997 struct event_subsystem {
998 	struct list_head	list;
999 	const char		*name;
1000 	struct event_filter	*filter;
1001 	int			ref_count;
1002 };
1003 
1004 struct ftrace_subsystem_dir {
1005 	struct list_head		list;
1006 	struct event_subsystem		*subsystem;
1007 	struct trace_array		*tr;
1008 	struct dentry			*entry;
1009 	int				ref_count;
1010 	int				nr_events;
1011 };
1012 
1013 #define FILTER_PRED_INVALID	((unsigned short)-1)
1014 #define FILTER_PRED_IS_RIGHT	(1 << 15)
1015 #define FILTER_PRED_FOLD	(1 << 15)
1016 
1017 /*
1018  * The max preds is the size of unsigned short with
1019  * two flags at the MSBs. One bit is used for both the IS_RIGHT
1020  * and FOLD flags. The other is reserved.
1021  *
1022  * 2^14 preds is way more than enough.
1023  */
1024 #define MAX_FILTER_PRED		16384
1025 
1026 struct filter_pred;
1027 struct regex;
1028 
1029 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1030 
1031 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1032 
1033 enum regex_type {
1034 	MATCH_FULL = 0,
1035 	MATCH_FRONT_ONLY,
1036 	MATCH_MIDDLE_ONLY,
1037 	MATCH_END_ONLY,
1038 };
1039 
1040 struct regex {
1041 	char			pattern[MAX_FILTER_STR_VAL];
1042 	int			len;
1043 	int			field_len;
1044 	regex_match_func	match;
1045 };
1046 
1047 struct filter_pred {
1048 	filter_pred_fn_t 	fn;
1049 	u64 			val;
1050 	struct regex		regex;
1051 	unsigned short		*ops;
1052 	struct ftrace_event_field *field;
1053 	int 			offset;
1054 	int 			not;
1055 	int 			op;
1056 	unsigned short		index;
1057 	unsigned short		parent;
1058 	unsigned short		left;
1059 	unsigned short		right;
1060 };
1061 
1062 extern enum regex_type
1063 filter_parse_regex(char *buff, int len, char **search, int *not);
1064 extern void print_event_filter(struct ftrace_event_file *file,
1065 			       struct trace_seq *s);
1066 extern int apply_event_filter(struct ftrace_event_file *file,
1067 			      char *filter_string);
1068 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1069 					char *filter_string);
1070 extern void print_subsystem_event_filter(struct event_subsystem *system,
1071 					 struct trace_seq *s);
1072 extern int filter_assign_type(const char *type);
1073 extern int create_event_filter(struct ftrace_event_call *call,
1074 			       char *filter_str, bool set_str,
1075 			       struct event_filter **filterp);
1076 extern void free_event_filter(struct event_filter *filter);
1077 
1078 struct ftrace_event_field *
1079 trace_find_event_field(struct ftrace_event_call *call, char *name);
1080 
1081 extern void trace_event_enable_cmd_record(bool enable);
1082 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1083 extern int event_trace_del_tracer(struct trace_array *tr);
1084 
1085 extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1086 						 const char *system,
1087 						 const char *event);
1088 
1089 static inline void *event_file_data(struct file *filp)
1090 {
1091 	return ACCESS_ONCE(file_inode(filp)->i_private);
1092 }
1093 
1094 extern struct mutex event_mutex;
1095 extern struct list_head ftrace_events;
1096 
1097 extern const struct file_operations event_trigger_fops;
1098 
1099 extern int register_trigger_cmds(void);
1100 extern void clear_event_triggers(struct trace_array *tr);
1101 
1102 struct event_trigger_data {
1103 	unsigned long			count;
1104 	int				ref;
1105 	struct event_trigger_ops	*ops;
1106 	struct event_command		*cmd_ops;
1107 	struct event_filter __rcu	*filter;
1108 	char				*filter_str;
1109 	void				*private_data;
1110 	struct list_head		list;
1111 };
1112 
1113 /**
1114  * struct event_trigger_ops - callbacks for trace event triggers
1115  *
1116  * The methods in this structure provide per-event trigger hooks for
1117  * various trigger operations.
1118  *
1119  * All the methods below, except for @init() and @free(), must be
1120  * implemented.
1121  *
1122  * @func: The trigger 'probe' function called when the triggering
1123  *	event occurs.  The data passed into this callback is the data
1124  *	that was supplied to the event_command @reg() function that
1125  *	registered the trigger (see struct event_command).
1126  *
1127  * @init: An optional initialization function called for the trigger
1128  *	when the trigger is registered (via the event_command reg()
1129  *	function).  This can be used to perform per-trigger
1130  *	initialization such as incrementing a per-trigger reference
1131  *	count, for instance.  This is usually implemented by the
1132  *	generic utility function @event_trigger_init() (see
1133  *	trace_event_triggers.c).
1134  *
1135  * @free: An optional de-initialization function called for the
1136  *	trigger when the trigger is unregistered (via the
1137  *	event_command @reg() function).  This can be used to perform
1138  *	per-trigger de-initialization such as decrementing a
1139  *	per-trigger reference count and freeing corresponding trigger
1140  *	data, for instance.  This is usually implemented by the
1141  *	generic utility function @event_trigger_free() (see
1142  *	trace_event_triggers.c).
1143  *
1144  * @print: The callback function invoked to have the trigger print
1145  *	itself.  This is usually implemented by a wrapper function
1146  *	that calls the generic utility function @event_trigger_print()
1147  *	(see trace_event_triggers.c).
1148  */
1149 struct event_trigger_ops {
1150 	void			(*func)(struct event_trigger_data *data);
1151 	int			(*init)(struct event_trigger_ops *ops,
1152 					struct event_trigger_data *data);
1153 	void			(*free)(struct event_trigger_ops *ops,
1154 					struct event_trigger_data *data);
1155 	int			(*print)(struct seq_file *m,
1156 					 struct event_trigger_ops *ops,
1157 					 struct event_trigger_data *data);
1158 };
1159 
1160 /**
1161  * struct event_command - callbacks and data members for event commands
1162  *
1163  * Event commands are invoked by users by writing the command name
1164  * into the 'trigger' file associated with a trace event.  The
1165  * parameters associated with a specific invocation of an event
1166  * command are used to create an event trigger instance, which is
1167  * added to the list of trigger instances associated with that trace
1168  * event.  When the event is hit, the set of triggers associated with
1169  * that event is invoked.
1170  *
1171  * The data members in this structure provide per-event command data
1172  * for various event commands.
1173  *
1174  * All the data members below, except for @post_trigger, must be set
1175  * for each event command.
1176  *
1177  * @name: The unique name that identifies the event command.  This is
1178  *	the name used when setting triggers via trigger files.
1179  *
1180  * @trigger_type: A unique id that identifies the event command
1181  *	'type'.  This value has two purposes, the first to ensure that
1182  *	only one trigger of the same type can be set at a given time
1183  *	for a particular event e.g. it doesn't make sense to have both
1184  *	a traceon and traceoff trigger attached to a single event at
1185  *	the same time, so traceon and traceoff have the same type
1186  *	though they have different names.  The @trigger_type value is
1187  *	also used as a bit value for deferring the actual trigger
1188  *	action until after the current event is finished.  Some
1189  *	commands need to do this if they themselves log to the trace
1190  *	buffer (see the @post_trigger() member below).  @trigger_type
1191  *	values are defined by adding new values to the trigger_type
1192  *	enum in include/linux/ftrace_event.h.
1193  *
1194  * @post_trigger: A flag that says whether or not this command needs
1195  *	to have its action delayed until after the current event has
1196  *	been closed.  Some triggers need to avoid being invoked while
1197  *	an event is currently in the process of being logged, since
1198  *	the trigger may itself log data into the trace buffer.  Thus
1199  *	we make sure the current event is committed before invoking
1200  *	those triggers.  To do that, the trigger invocation is split
1201  *	in two - the first part checks the filter using the current
1202  *	trace record; if a command has the @post_trigger flag set, it
1203  *	sets a bit for itself in the return value, otherwise it
1204  *	directly invokes the trigger.  Once all commands have been
1205  *	either invoked or set their return flag, the current record is
1206  *	either committed or discarded.  At that point, if any commands
1207  *	have deferred their triggers, those commands are finally
1208  *	invoked following the close of the current event.  In other
1209  *	words, if the event_trigger_ops @func() probe implementation
1210  *	itself logs to the trace buffer, this flag should be set,
1211  *	otherwise it can be left unspecified.
1212  *
1213  * All the methods below, except for @set_filter(), must be
1214  * implemented.
1215  *
1216  * @func: The callback function responsible for parsing and
1217  *	registering the trigger written to the 'trigger' file by the
1218  *	user.  It allocates the trigger instance and registers it with
1219  *	the appropriate trace event.  It makes use of the other
1220  *	event_command callback functions to orchestrate this, and is
1221  *	usually implemented by the generic utility function
1222  *	@event_trigger_callback() (see trace_event_triggers.c).
1223  *
1224  * @reg: Adds the trigger to the list of triggers associated with the
1225  *	event, and enables the event trigger itself, after
1226  *	initializing it (via the event_trigger_ops @init() function).
1227  *	This is also where commands can use the @trigger_type value to
1228  *	make the decision as to whether or not multiple instances of
1229  *	the trigger should be allowed.  This is usually implemented by
1230  *	the generic utility function @register_trigger() (see
1231  *	trace_event_triggers.c).
1232  *
1233  * @unreg: Removes the trigger from the list of triggers associated
1234  *	with the event, and disables the event trigger itself, after
1235  *	initializing it (via the event_trigger_ops @free() function).
1236  *	This is usually implemented by the generic utility function
1237  *	@unregister_trigger() (see trace_event_triggers.c).
1238  *
1239  * @set_filter: An optional function called to parse and set a filter
1240  *	for the trigger.  If no @set_filter() method is set for the
1241  *	event command, filters set by the user for the command will be
1242  *	ignored.  This is usually implemented by the generic utility
1243  *	function @set_trigger_filter() (see trace_event_triggers.c).
1244  *
1245  * @get_trigger_ops: The callback function invoked to retrieve the
1246  *	event_trigger_ops implementation associated with the command.
1247  */
1248 struct event_command {
1249 	struct list_head	list;
1250 	char			*name;
1251 	enum event_trigger_type	trigger_type;
1252 	bool			post_trigger;
1253 	int			(*func)(struct event_command *cmd_ops,
1254 					struct ftrace_event_file *file,
1255 					char *glob, char *cmd, char *params);
1256 	int			(*reg)(char *glob,
1257 				       struct event_trigger_ops *ops,
1258 				       struct event_trigger_data *data,
1259 				       struct ftrace_event_file *file);
1260 	void			(*unreg)(char *glob,
1261 					 struct event_trigger_ops *ops,
1262 					 struct event_trigger_data *data,
1263 					 struct ftrace_event_file *file);
1264 	int			(*set_filter)(char *filter_str,
1265 					      struct event_trigger_data *data,
1266 					      struct ftrace_event_file *file);
1267 	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1268 };
1269 
1270 extern int trace_event_enable_disable(struct ftrace_event_file *file,
1271 				      int enable, int soft_disable);
1272 extern int tracing_alloc_snapshot(void);
1273 
1274 extern const char *__start___trace_bprintk_fmt[];
1275 extern const char *__stop___trace_bprintk_fmt[];
1276 
1277 extern const char *__start___tracepoint_str[];
1278 extern const char *__stop___tracepoint_str[];
1279 
1280 void trace_printk_init_buffers(void);
1281 void trace_printk_start_comm(void);
1282 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1283 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1284 
1285 /*
1286  * Normal trace_printk() and friends allocates special buffers
1287  * to do the manipulation, as well as saves the print formats
1288  * into sections to display. But the trace infrastructure wants
1289  * to use these without the added overhead at the price of being
1290  * a bit slower (used mainly for warnings, where we don't care
1291  * about performance). The internal_trace_puts() is for such
1292  * a purpose.
1293  */
1294 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1295 
1296 #undef FTRACE_ENTRY
1297 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1298 	extern struct ftrace_event_call					\
1299 	__aligned(4) event_##call;
1300 #undef FTRACE_ENTRY_DUP
1301 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
1302 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1303 		     filter)
1304 #include "trace_entries.h"
1305 
1306 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1307 int perf_ftrace_event_register(struct ftrace_event_call *call,
1308 			       enum trace_reg type, void *data);
1309 #else
1310 #define perf_ftrace_event_register NULL
1311 #endif
1312 
1313 #endif /* _LINUX_KERNEL_TRACE_H */
1314