xref: /openbmc/linux/include/linux/ftrace.h (revision 3381df0954199458fa3993db72fb427f0ed1e43b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ftrace header.  For implementation details beyond the random comments
4  * scattered below, see: Documentation/trace/ftrace-design.rst
5  */
6 
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9 
10 #include <linux/trace_clock.h>
11 #include <linux/kallsyms.h>
12 #include <linux/linkage.h>
13 #include <linux/bitops.h>
14 #include <linux/ptrace.h>
15 #include <linux/ktime.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/fs.h>
20 
21 #include <asm/ftrace.h>
22 
23 /*
24  * If the arch supports passing the variable contents of
25  * function_trace_op as the third parameter back from the
26  * mcount call, then the arch should define this as 1.
27  */
28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
29 #define ARCH_SUPPORTS_FTRACE_OPS 0
30 #endif
31 
32 /*
33  * If the arch's mcount caller does not support all of ftrace's
34  * features, then it must call an indirect function that
35  * does. Or at least does enough to prevent any unwelcomed side effects.
36  */
37 #if !ARCH_SUPPORTS_FTRACE_OPS
38 # define FTRACE_FORCE_LIST_FUNC 1
39 #else
40 # define FTRACE_FORCE_LIST_FUNC 0
41 #endif
42 
43 /* Main tracing buffer and events set up */
44 #ifdef CONFIG_TRACING
45 void trace_init(void);
46 void early_trace_init(void);
47 #else
48 static inline void trace_init(void) { }
49 static inline void early_trace_init(void) { }
50 #endif
51 
52 struct module;
53 struct ftrace_hash;
54 struct ftrace_direct_func;
55 
56 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
57 	defined(CONFIG_DYNAMIC_FTRACE)
58 const char *
59 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
60 		   unsigned long *off, char **modname, char *sym);
61 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
62 			   char *type, char *name,
63 			   char *module_name, int *exported);
64 #else
65 static inline const char *
66 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
67 		   unsigned long *off, char **modname, char *sym)
68 {
69 	return NULL;
70 }
71 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
72 					 char *type, char *name,
73 					 char *module_name, int *exported)
74 {
75 	return -1;
76 }
77 #endif
78 
79 
80 #ifdef CONFIG_FUNCTION_TRACER
81 
82 extern int ftrace_enabled;
83 extern int
84 ftrace_enable_sysctl(struct ctl_table *table, int write,
85 		     void __user *buffer, size_t *lenp,
86 		     loff_t *ppos);
87 
88 struct ftrace_ops;
89 
90 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
91 			      struct ftrace_ops *op, struct pt_regs *regs);
92 
93 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
94 
95 /*
96  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
97  * set in the flags member.
98  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
99  * IPMODIFY are a kind of attribute flags which can be set only before
100  * registering the ftrace_ops, and can not be modified while registered.
101  * Changing those attribute flags after registering ftrace_ops will
102  * cause unexpected results.
103  *
104  * ENABLED - set/unset when ftrace_ops is registered/unregistered
105  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
106  *           allocated ftrace_ops which need special care
107  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
108  *            and passed to the callback. If this flag is set, but the
109  *            architecture does not support passing regs
110  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
111  *            ftrace_ops will fail to register, unless the next flag
112  *            is set.
113  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
114  *            handler can handle an arch that does not save regs
115  *            (the handler tests if regs == NULL), then it can set
116  *            this flag instead. It will not fail registering the ftrace_ops
117  *            but, the regs field will be NULL if the arch does not support
118  *            passing regs to the handler.
119  *            Note, if this flag is set, the SAVE_REGS flag will automatically
120  *            get set upon registering the ftrace_ops, if the arch supports it.
121  * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
122  *            that the call back has its own recursion protection. If it does
123  *            not set this, then the ftrace infrastructure will add recursion
124  *            protection for the caller.
125  * STUB   - The ftrace_ops is just a place holder.
126  * INITIALIZED - The ftrace_ops has already been initialized (first use time
127  *            register_ftrace_function() is called, it will initialized the ops)
128  * DELETED - The ops are being deleted, do not let them be registered again.
129  * ADDING  - The ops is in the process of being added.
130  * REMOVING - The ops is in the process of being removed.
131  * MODIFYING - The ops is in the process of changing its filter functions.
132  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
133  *            The arch specific code sets this flag when it allocated a
134  *            trampoline. This lets the arch know that it can update the
135  *            trampoline in case the callback function changes.
136  *            The ftrace_ops trampoline can be set by the ftrace users, and
137  *            in such cases the arch must not modify it. Only the arch ftrace
138  *            core code should set this flag.
139  * IPMODIFY - The ops can modify the IP register. This can only be set with
140  *            SAVE_REGS. If another ops with this flag set is already registered
141  *            for any of the functions that this ops will be registered for, then
142  *            this ops will fail to register or set_filter_ip.
143  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
144  * RCU     - Set when the ops can only be called when RCU is watching.
145  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
146  * PERMANENT - Set when the ops is permanent and should not be affected by
147  *             ftrace_enabled.
148  * DIRECT - Used by the direct ftrace_ops helper for direct functions
149  *            (internal ftrace only, should not be used by others)
150  */
151 enum {
152 	FTRACE_OPS_FL_ENABLED			= BIT(0),
153 	FTRACE_OPS_FL_DYNAMIC			= BIT(1),
154 	FTRACE_OPS_FL_SAVE_REGS			= BIT(2),
155 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= BIT(3),
156 	FTRACE_OPS_FL_RECURSION_SAFE		= BIT(4),
157 	FTRACE_OPS_FL_STUB			= BIT(5),
158 	FTRACE_OPS_FL_INITIALIZED		= BIT(6),
159 	FTRACE_OPS_FL_DELETED			= BIT(7),
160 	FTRACE_OPS_FL_ADDING			= BIT(8),
161 	FTRACE_OPS_FL_REMOVING			= BIT(9),
162 	FTRACE_OPS_FL_MODIFYING			= BIT(10),
163 	FTRACE_OPS_FL_ALLOC_TRAMP		= BIT(11),
164 	FTRACE_OPS_FL_IPMODIFY			= BIT(12),
165 	FTRACE_OPS_FL_PID			= BIT(13),
166 	FTRACE_OPS_FL_RCU			= BIT(14),
167 	FTRACE_OPS_FL_TRACE_ARRAY		= BIT(15),
168 	FTRACE_OPS_FL_PERMANENT                 = BIT(16),
169 	FTRACE_OPS_FL_DIRECT			= BIT(17),
170 };
171 
172 #ifdef CONFIG_DYNAMIC_FTRACE
173 /* The hash used to know what functions callbacks trace */
174 struct ftrace_ops_hash {
175 	struct ftrace_hash __rcu	*notrace_hash;
176 	struct ftrace_hash __rcu	*filter_hash;
177 	struct mutex			regex_lock;
178 };
179 
180 void ftrace_free_init_mem(void);
181 void ftrace_free_mem(struct module *mod, void *start, void *end);
182 #else
183 static inline void ftrace_free_init_mem(void) { }
184 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
185 #endif
186 
187 /*
188  * Note, ftrace_ops can be referenced outside of RCU protection, unless
189  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
190  * core data, the unregistering of it will perform a scheduling on all CPUs
191  * to make sure that there are no more users. Depending on the load of the
192  * system that may take a bit of time.
193  *
194  * Any private data added must also take care not to be freed and if private
195  * data is added to a ftrace_ops that is in core code, the user of the
196  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
197  */
198 struct ftrace_ops {
199 	ftrace_func_t			func;
200 	struct ftrace_ops __rcu		*next;
201 	unsigned long			flags;
202 	void				*private;
203 	ftrace_func_t			saved_func;
204 #ifdef CONFIG_DYNAMIC_FTRACE
205 	struct ftrace_ops_hash		local_hash;
206 	struct ftrace_ops_hash		*func_hash;
207 	struct ftrace_ops_hash		old_hash;
208 	unsigned long			trampoline;
209 	unsigned long			trampoline_size;
210 #endif
211 };
212 
213 /*
214  * Type of the current tracing.
215  */
216 enum ftrace_tracing_type_t {
217 	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
218 	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
219 };
220 
221 /* Current tracing type, default is FTRACE_TYPE_ENTER */
222 extern enum ftrace_tracing_type_t ftrace_tracing_type;
223 
224 /*
225  * The ftrace_ops must be a static and should also
226  * be read_mostly.  These functions do modify read_mostly variables
227  * so use them sparely. Never free an ftrace_op or modify the
228  * next pointer after it has been registered. Even after unregistering
229  * it, the next pointer may still be used internally.
230  */
231 int register_ftrace_function(struct ftrace_ops *ops);
232 int unregister_ftrace_function(struct ftrace_ops *ops);
233 
234 extern void ftrace_stub(unsigned long a0, unsigned long a1,
235 			struct ftrace_ops *op, struct pt_regs *regs);
236 
237 #else /* !CONFIG_FUNCTION_TRACER */
238 /*
239  * (un)register_ftrace_function must be a macro since the ops parameter
240  * must not be evaluated.
241  */
242 #define register_ftrace_function(ops) ({ 0; })
243 #define unregister_ftrace_function(ops) ({ 0; })
244 static inline void ftrace_kill(void) { }
245 static inline void ftrace_free_init_mem(void) { }
246 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
247 #endif /* CONFIG_FUNCTION_TRACER */
248 
249 struct ftrace_func_entry {
250 	struct hlist_node hlist;
251 	unsigned long ip;
252 	unsigned long direct; /* for direct lookup only */
253 };
254 
255 struct dyn_ftrace;
256 
257 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
258 extern int ftrace_direct_func_count;
259 int register_ftrace_direct(unsigned long ip, unsigned long addr);
260 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
261 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
262 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
263 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
264 				struct dyn_ftrace *rec,
265 				unsigned long old_addr,
266 				unsigned long new_addr);
267 unsigned long ftrace_find_rec_direct(unsigned long ip);
268 #else
269 # define ftrace_direct_func_count 0
270 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
271 {
272 	return -ENOTSUPP;
273 }
274 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
275 {
276 	return -ENOTSUPP;
277 }
278 static inline int modify_ftrace_direct(unsigned long ip,
279 				       unsigned long old_addr, unsigned long new_addr)
280 {
281 	return -ENOTSUPP;
282 }
283 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
284 {
285 	return NULL;
286 }
287 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
288 					      struct dyn_ftrace *rec,
289 					      unsigned long old_addr,
290 					      unsigned long new_addr)
291 {
292 	return -ENODEV;
293 }
294 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
295 {
296 	return 0;
297 }
298 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
299 
300 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
301 /*
302  * This must be implemented by the architecture.
303  * It is the way the ftrace direct_ops helper, when called
304  * via ftrace (because there's other callbacks besides the
305  * direct call), can inform the architecture's trampoline that this
306  * routine has a direct caller, and what the caller is.
307  *
308  * For example, in x86, it returns the direct caller
309  * callback function via the regs->orig_ax parameter.
310  * Then in the ftrace trampoline, if this is set, it makes
311  * the return from the trampoline jump to the direct caller
312  * instead of going back to the function it just traced.
313  */
314 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
315 						 unsigned long addr) { }
316 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
317 
318 #ifdef CONFIG_STACK_TRACER
319 
320 extern int stack_tracer_enabled;
321 
322 int stack_trace_sysctl(struct ctl_table *table, int write,
323 		       void __user *buffer, size_t *lenp,
324 		       loff_t *ppos);
325 
326 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
327 DECLARE_PER_CPU(int, disable_stack_tracer);
328 
329 /**
330  * stack_tracer_disable - temporarily disable the stack tracer
331  *
332  * There's a few locations (namely in RCU) where stack tracing
333  * cannot be executed. This function is used to disable stack
334  * tracing during those critical sections.
335  *
336  * This function must be called with preemption or interrupts
337  * disabled and stack_tracer_enable() must be called shortly after
338  * while preemption or interrupts are still disabled.
339  */
340 static inline void stack_tracer_disable(void)
341 {
342 	/* Preemption or interupts must be disabled */
343 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
344 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
345 	this_cpu_inc(disable_stack_tracer);
346 }
347 
348 /**
349  * stack_tracer_enable - re-enable the stack tracer
350  *
351  * After stack_tracer_disable() is called, stack_tracer_enable()
352  * must be called shortly afterward.
353  */
354 static inline void stack_tracer_enable(void)
355 {
356 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
357 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
358 	this_cpu_dec(disable_stack_tracer);
359 }
360 #else
361 static inline void stack_tracer_disable(void) { }
362 static inline void stack_tracer_enable(void) { }
363 #endif
364 
365 #ifdef CONFIG_DYNAMIC_FTRACE
366 
367 int ftrace_arch_code_modify_prepare(void);
368 int ftrace_arch_code_modify_post_process(void);
369 
370 enum ftrace_bug_type {
371 	FTRACE_BUG_UNKNOWN,
372 	FTRACE_BUG_INIT,
373 	FTRACE_BUG_NOP,
374 	FTRACE_BUG_CALL,
375 	FTRACE_BUG_UPDATE,
376 };
377 extern enum ftrace_bug_type ftrace_bug_type;
378 
379 /*
380  * Archs can set this to point to a variable that holds the value that was
381  * expected at the call site before calling ftrace_bug().
382  */
383 extern const void *ftrace_expected;
384 
385 void ftrace_bug(int err, struct dyn_ftrace *rec);
386 
387 struct seq_file;
388 
389 extern int ftrace_text_reserved(const void *start, const void *end);
390 
391 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
392 
393 bool is_ftrace_trampoline(unsigned long addr);
394 
395 /*
396  * The dyn_ftrace record's flags field is split into two parts.
397  * the first part which is '0-FTRACE_REF_MAX' is a counter of
398  * the number of callbacks that have registered the function that
399  * the dyn_ftrace descriptor represents.
400  *
401  * The second part is a mask:
402  *  ENABLED - the function is being traced
403  *  REGS    - the record wants the function to save regs
404  *  REGS_EN - the function is set up to save regs.
405  *  IPMODIFY - the record allows for the IP address to be changed.
406  *  DISABLED - the record is not ready to be touched yet
407  *  DIRECT   - there is a direct function to call
408  *
409  * When a new ftrace_ops is registered and wants a function to save
410  * pt_regs, the rec->flag REGS is set. When the function has been
411  * set up to save regs, the REG_EN flag is set. Once a function
412  * starts saving regs it will do so until all ftrace_ops are removed
413  * from tracing that function.
414  */
415 enum {
416 	FTRACE_FL_ENABLED	= (1UL << 31),
417 	FTRACE_FL_REGS		= (1UL << 30),
418 	FTRACE_FL_REGS_EN	= (1UL << 29),
419 	FTRACE_FL_TRAMP		= (1UL << 28),
420 	FTRACE_FL_TRAMP_EN	= (1UL << 27),
421 	FTRACE_FL_IPMODIFY	= (1UL << 26),
422 	FTRACE_FL_DISABLED	= (1UL << 25),
423 	FTRACE_FL_DIRECT	= (1UL << 24),
424 	FTRACE_FL_DIRECT_EN	= (1UL << 23),
425 };
426 
427 #define FTRACE_REF_MAX_SHIFT	23
428 #define FTRACE_FL_BITS		9
429 #define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
430 #define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
431 #define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
432 
433 #define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)
434 
435 struct dyn_ftrace {
436 	unsigned long		ip; /* address of mcount call-site */
437 	unsigned long		flags;
438 	struct dyn_arch_ftrace	arch;
439 };
440 
441 int ftrace_force_update(void);
442 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
443 			 int remove, int reset);
444 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
445 		       int len, int reset);
446 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
447 			int len, int reset);
448 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
449 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
450 void ftrace_free_filter(struct ftrace_ops *ops);
451 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
452 
453 enum {
454 	FTRACE_UPDATE_CALLS		= (1 << 0),
455 	FTRACE_DISABLE_CALLS		= (1 << 1),
456 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
457 	FTRACE_START_FUNC_RET		= (1 << 3),
458 	FTRACE_STOP_FUNC_RET		= (1 << 4),
459 	FTRACE_MAY_SLEEP		= (1 << 5),
460 };
461 
462 /*
463  * The FTRACE_UPDATE_* enum is used to pass information back
464  * from the ftrace_update_record() and ftrace_test_record()
465  * functions. These are called by the code update routines
466  * to find out what is to be done for a given function.
467  *
468  *  IGNORE           - The function is already what we want it to be
469  *  MAKE_CALL        - Start tracing the function
470  *  MODIFY_CALL      - Stop saving regs for the function
471  *  MAKE_NOP         - Stop tracing the function
472  */
473 enum {
474 	FTRACE_UPDATE_IGNORE,
475 	FTRACE_UPDATE_MAKE_CALL,
476 	FTRACE_UPDATE_MODIFY_CALL,
477 	FTRACE_UPDATE_MAKE_NOP,
478 };
479 
480 enum {
481 	FTRACE_ITER_FILTER	= (1 << 0),
482 	FTRACE_ITER_NOTRACE	= (1 << 1),
483 	FTRACE_ITER_PRINTALL	= (1 << 2),
484 	FTRACE_ITER_DO_PROBES	= (1 << 3),
485 	FTRACE_ITER_PROBE	= (1 << 4),
486 	FTRACE_ITER_MOD		= (1 << 5),
487 	FTRACE_ITER_ENABLED	= (1 << 6),
488 };
489 
490 void arch_ftrace_update_code(int command);
491 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
492 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
493 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
494 
495 struct ftrace_rec_iter;
496 
497 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
498 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
499 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
500 
501 #define for_ftrace_rec_iter(iter)		\
502 	for (iter = ftrace_rec_iter_start();	\
503 	     iter;				\
504 	     iter = ftrace_rec_iter_next(iter))
505 
506 
507 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
508 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
509 void ftrace_run_stop_machine(int command);
510 unsigned long ftrace_location(unsigned long ip);
511 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
512 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
513 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
514 
515 extern ftrace_func_t ftrace_trace_function;
516 
517 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
518 		  struct inode *inode, struct file *file);
519 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
520 			    size_t cnt, loff_t *ppos);
521 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
522 			     size_t cnt, loff_t *ppos);
523 int ftrace_regex_release(struct inode *inode, struct file *file);
524 
525 void __init
526 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
527 
528 /* defined in arch */
529 extern int ftrace_ip_converted(unsigned long ip);
530 extern int ftrace_dyn_arch_init(void);
531 extern void ftrace_replace_code(int enable);
532 extern int ftrace_update_ftrace_func(ftrace_func_t func);
533 extern void ftrace_caller(void);
534 extern void ftrace_regs_caller(void);
535 extern void ftrace_call(void);
536 extern void ftrace_regs_call(void);
537 extern void mcount_call(void);
538 
539 void ftrace_modify_all_code(int command);
540 
541 #ifndef FTRACE_ADDR
542 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
543 #endif
544 
545 #ifndef FTRACE_GRAPH_ADDR
546 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
547 #endif
548 
549 #ifndef FTRACE_REGS_ADDR
550 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
551 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
552 #else
553 # define FTRACE_REGS_ADDR FTRACE_ADDR
554 #endif
555 #endif
556 
557 /*
558  * If an arch would like functions that are only traced
559  * by the function graph tracer to jump directly to its own
560  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
561  * to be that address to jump to.
562  */
563 #ifndef FTRACE_GRAPH_TRAMP_ADDR
564 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
565 #endif
566 
567 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
568 extern void ftrace_graph_caller(void);
569 extern int ftrace_enable_ftrace_graph_caller(void);
570 extern int ftrace_disable_ftrace_graph_caller(void);
571 #else
572 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
573 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
574 #endif
575 
576 /**
577  * ftrace_make_nop - convert code into nop
578  * @mod: module structure if called by module load initialization
579  * @rec: the call site record (e.g. mcount/fentry)
580  * @addr: the address that the call site should be calling
581  *
582  * This is a very sensitive operation and great care needs
583  * to be taken by the arch.  The operation should carefully
584  * read the location, check to see if what is read is indeed
585  * what we expect it to be, and then on success of the compare,
586  * it should write to the location.
587  *
588  * The code segment at @rec->ip should be a caller to @addr
589  *
590  * Return must be:
591  *  0 on success
592  *  -EFAULT on error reading the location
593  *  -EINVAL on a failed compare of the contents
594  *  -EPERM  on error writing to the location
595  * Any other value will be considered a failure.
596  */
597 extern int ftrace_make_nop(struct module *mod,
598 			   struct dyn_ftrace *rec, unsigned long addr);
599 
600 
601 /**
602  * ftrace_init_nop - initialize a nop call site
603  * @mod: module structure if called by module load initialization
604  * @rec: the call site record (e.g. mcount/fentry)
605  *
606  * This is a very sensitive operation and great care needs
607  * to be taken by the arch.  The operation should carefully
608  * read the location, check to see if what is read is indeed
609  * what we expect it to be, and then on success of the compare,
610  * it should write to the location.
611  *
612  * The code segment at @rec->ip should contain the contents created by
613  * the compiler
614  *
615  * Return must be:
616  *  0 on success
617  *  -EFAULT on error reading the location
618  *  -EINVAL on a failed compare of the contents
619  *  -EPERM  on error writing to the location
620  * Any other value will be considered a failure.
621  */
622 #ifndef ftrace_init_nop
623 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
624 {
625 	return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
626 }
627 #endif
628 
629 /**
630  * ftrace_make_call - convert a nop call site into a call to addr
631  * @rec: the call site record (e.g. mcount/fentry)
632  * @addr: the address that the call site should call
633  *
634  * This is a very sensitive operation and great care needs
635  * to be taken by the arch.  The operation should carefully
636  * read the location, check to see if what is read is indeed
637  * what we expect it to be, and then on success of the compare,
638  * it should write to the location.
639  *
640  * The code segment at @rec->ip should be a nop
641  *
642  * Return must be:
643  *  0 on success
644  *  -EFAULT on error reading the location
645  *  -EINVAL on a failed compare of the contents
646  *  -EPERM  on error writing to the location
647  * Any other value will be considered a failure.
648  */
649 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
650 
651 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
652 /**
653  * ftrace_modify_call - convert from one addr to another (no nop)
654  * @rec: the call site record (e.g. mcount/fentry)
655  * @old_addr: the address expected to be currently called to
656  * @addr: the address to change to
657  *
658  * This is a very sensitive operation and great care needs
659  * to be taken by the arch.  The operation should carefully
660  * read the location, check to see if what is read is indeed
661  * what we expect it to be, and then on success of the compare,
662  * it should write to the location.
663  *
664  * The code segment at @rec->ip should be a caller to @old_addr
665  *
666  * Return must be:
667  *  0 on success
668  *  -EFAULT on error reading the location
669  *  -EINVAL on a failed compare of the contents
670  *  -EPERM  on error writing to the location
671  * Any other value will be considered a failure.
672  */
673 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
674 			      unsigned long addr);
675 #else
676 /* Should never be called */
677 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
678 				     unsigned long addr)
679 {
680 	return -EINVAL;
681 }
682 #endif
683 
684 /* May be defined in arch */
685 extern int ftrace_arch_read_dyn_info(char *buf, int size);
686 
687 extern int skip_trace(unsigned long ip);
688 extern void ftrace_module_init(struct module *mod);
689 extern void ftrace_module_enable(struct module *mod);
690 extern void ftrace_release_mod(struct module *mod);
691 
692 extern void ftrace_disable_daemon(void);
693 extern void ftrace_enable_daemon(void);
694 #else /* CONFIG_DYNAMIC_FTRACE */
695 static inline int skip_trace(unsigned long ip) { return 0; }
696 static inline int ftrace_force_update(void) { return 0; }
697 static inline void ftrace_disable_daemon(void) { }
698 static inline void ftrace_enable_daemon(void) { }
699 static inline void ftrace_module_init(struct module *mod) { }
700 static inline void ftrace_module_enable(struct module *mod) { }
701 static inline void ftrace_release_mod(struct module *mod) { }
702 static inline int ftrace_text_reserved(const void *start, const void *end)
703 {
704 	return 0;
705 }
706 static inline unsigned long ftrace_location(unsigned long ip)
707 {
708 	return 0;
709 }
710 
711 /*
712  * Again users of functions that have ftrace_ops may not
713  * have them defined when ftrace is not enabled, but these
714  * functions may still be called. Use a macro instead of inline.
715  */
716 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
717 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
718 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
719 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
720 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
721 #define ftrace_free_filter(ops) do { } while (0)
722 #define ftrace_ops_set_global_filter(ops) do { } while (0)
723 
724 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
725 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
726 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
727 			     size_t cnt, loff_t *ppos) { return -ENODEV; }
728 static inline int
729 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
730 
731 static inline bool is_ftrace_trampoline(unsigned long addr)
732 {
733 	return false;
734 }
735 #endif /* CONFIG_DYNAMIC_FTRACE */
736 
737 /* totally disable ftrace - can not re-enable after this */
738 void ftrace_kill(void);
739 
740 static inline void tracer_disable(void)
741 {
742 #ifdef CONFIG_FUNCTION_TRACER
743 	ftrace_enabled = 0;
744 #endif
745 }
746 
747 /*
748  * Ftrace disable/restore without lock. Some synchronization mechanism
749  * must be used to prevent ftrace_enabled to be changed between
750  * disable/restore.
751  */
752 static inline int __ftrace_enabled_save(void)
753 {
754 #ifdef CONFIG_FUNCTION_TRACER
755 	int saved_ftrace_enabled = ftrace_enabled;
756 	ftrace_enabled = 0;
757 	return saved_ftrace_enabled;
758 #else
759 	return 0;
760 #endif
761 }
762 
763 static inline void __ftrace_enabled_restore(int enabled)
764 {
765 #ifdef CONFIG_FUNCTION_TRACER
766 	ftrace_enabled = enabled;
767 #endif
768 }
769 
770 /* All archs should have this, but we define it for consistency */
771 #ifndef ftrace_return_address0
772 # define ftrace_return_address0 __builtin_return_address(0)
773 #endif
774 
775 /* Archs may use other ways for ADDR1 and beyond */
776 #ifndef ftrace_return_address
777 # ifdef CONFIG_FRAME_POINTER
778 #  define ftrace_return_address(n) __builtin_return_address(n)
779 # else
780 #  define ftrace_return_address(n) 0UL
781 # endif
782 #endif
783 
784 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
785 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
786 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
787 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
788 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
789 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
790 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
791 
792 static inline unsigned long get_lock_parent_ip(void)
793 {
794 	unsigned long addr = CALLER_ADDR0;
795 
796 	if (!in_lock_functions(addr))
797 		return addr;
798 	addr = CALLER_ADDR1;
799 	if (!in_lock_functions(addr))
800 		return addr;
801 	return CALLER_ADDR2;
802 }
803 
804 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
805   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
806   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
807 #else
808 /*
809  * Use defines instead of static inlines because some arches will make code out
810  * of the CALLER_ADDR, when we really want these to be a real nop.
811  */
812 # define trace_preempt_on(a0, a1) do { } while (0)
813 # define trace_preempt_off(a0, a1) do { } while (0)
814 #endif
815 
816 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
817 extern void ftrace_init(void);
818 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
819 #define FTRACE_CALLSITE_SECTION	"__patchable_function_entries"
820 #else
821 #define FTRACE_CALLSITE_SECTION	"__mcount_loc"
822 #endif
823 #else
824 static inline void ftrace_init(void) { }
825 #endif
826 
827 /*
828  * Structure that defines an entry function trace.
829  * It's already packed but the attribute "packed" is needed
830  * to remove extra padding at the end.
831  */
832 struct ftrace_graph_ent {
833 	unsigned long func; /* Current function */
834 	int depth;
835 } __packed;
836 
837 /*
838  * Structure that defines a return function trace.
839  * It's already packed but the attribute "packed" is needed
840  * to remove extra padding at the end.
841  */
842 struct ftrace_graph_ret {
843 	unsigned long func; /* Current function */
844 	/* Number of functions that overran the depth limit for current task */
845 	unsigned long overrun;
846 	unsigned long long calltime;
847 	unsigned long long rettime;
848 	int depth;
849 } __packed;
850 
851 /* Type of the callback handlers for tracing function graph*/
852 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
853 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
854 
855 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
856 
857 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
858 
859 struct fgraph_ops {
860 	trace_func_graph_ent_t		entryfunc;
861 	trace_func_graph_ret_t		retfunc;
862 };
863 
864 /*
865  * Stack of return addresses for functions
866  * of a thread.
867  * Used in struct thread_info
868  */
869 struct ftrace_ret_stack {
870 	unsigned long ret;
871 	unsigned long func;
872 	unsigned long long calltime;
873 #ifdef CONFIG_FUNCTION_PROFILER
874 	unsigned long long subtime;
875 #endif
876 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
877 	unsigned long fp;
878 #endif
879 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
880 	unsigned long *retp;
881 #endif
882 };
883 
884 /*
885  * Primary handler of a function return.
886  * It relays on ftrace_return_to_handler.
887  * Defined in entry_32/64.S
888  */
889 extern void return_to_handler(void);
890 
891 extern int
892 function_graph_enter(unsigned long ret, unsigned long func,
893 		     unsigned long frame_pointer, unsigned long *retp);
894 
895 struct ftrace_ret_stack *
896 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
897 
898 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
899 				    unsigned long ret, unsigned long *retp);
900 
901 /*
902  * Sometimes we don't want to trace a function with the function
903  * graph tracer but we want them to keep traced by the usual function
904  * tracer if the function graph tracer is not configured.
905  */
906 #define __notrace_funcgraph		notrace
907 
908 #define FTRACE_RETFUNC_DEPTH 50
909 #define FTRACE_RETSTACK_ALLOC_SIZE 32
910 
911 extern int register_ftrace_graph(struct fgraph_ops *ops);
912 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
913 
914 extern bool ftrace_graph_is_dead(void);
915 extern void ftrace_graph_stop(void);
916 
917 /* The current handlers in use */
918 extern trace_func_graph_ret_t ftrace_graph_return;
919 extern trace_func_graph_ent_t ftrace_graph_entry;
920 
921 extern void ftrace_graph_init_task(struct task_struct *t);
922 extern void ftrace_graph_exit_task(struct task_struct *t);
923 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
924 
925 static inline void pause_graph_tracing(void)
926 {
927 	atomic_inc(&current->tracing_graph_pause);
928 }
929 
930 static inline void unpause_graph_tracing(void)
931 {
932 	atomic_dec(&current->tracing_graph_pause);
933 }
934 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
935 
936 #define __notrace_funcgraph
937 
938 static inline void ftrace_graph_init_task(struct task_struct *t) { }
939 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
940 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
941 
942 /* Define as macros as fgraph_ops may not be defined */
943 #define register_ftrace_graph(ops) ({ -1; })
944 #define unregister_ftrace_graph(ops) do { } while (0)
945 
946 static inline unsigned long
947 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
948 		      unsigned long *retp)
949 {
950 	return ret;
951 }
952 
953 static inline void pause_graph_tracing(void) { }
954 static inline void unpause_graph_tracing(void) { }
955 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
956 
957 #ifdef CONFIG_TRACING
958 
959 /* flags for current->trace */
960 enum {
961 	TSK_TRACE_FL_TRACE_BIT	= 0,
962 	TSK_TRACE_FL_GRAPH_BIT	= 1,
963 };
964 enum {
965 	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
966 	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
967 };
968 
969 static inline void set_tsk_trace_trace(struct task_struct *tsk)
970 {
971 	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
972 }
973 
974 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
975 {
976 	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
977 }
978 
979 static inline int test_tsk_trace_trace(struct task_struct *tsk)
980 {
981 	return tsk->trace & TSK_TRACE_FL_TRACE;
982 }
983 
984 static inline void set_tsk_trace_graph(struct task_struct *tsk)
985 {
986 	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
987 }
988 
989 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
990 {
991 	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
992 }
993 
994 static inline int test_tsk_trace_graph(struct task_struct *tsk)
995 {
996 	return tsk->trace & TSK_TRACE_FL_GRAPH;
997 }
998 
999 enum ftrace_dump_mode;
1000 
1001 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1002 extern int tracepoint_printk;
1003 
1004 extern void disable_trace_on_warning(void);
1005 extern int __disable_trace_on_warning;
1006 
1007 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1008 			     void __user *buffer, size_t *lenp,
1009 			     loff_t *ppos);
1010 
1011 #else /* CONFIG_TRACING */
1012 static inline void  disable_trace_on_warning(void) { }
1013 #endif /* CONFIG_TRACING */
1014 
1015 #ifdef CONFIG_FTRACE_SYSCALLS
1016 
1017 unsigned long arch_syscall_addr(int nr);
1018 
1019 #endif /* CONFIG_FTRACE_SYSCALLS */
1020 
1021 #endif /* _LINUX_FTRACE_H */
1022