xref: /openbmc/linux/include/linux/ftrace.h (revision ffdd9bd7a278e37aa80de9ccc0b511d7387c2be7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ftrace header.  For implementation details beyond the random comments
4  * scattered below, see: Documentation/trace/ftrace-design.rst
5  */
6 
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9 
10 #include <linux/trace_recursion.h>
11 #include <linux/trace_clock.h>
12 #include <linux/jump_label.h>
13 #include <linux/kallsyms.h>
14 #include <linux/linkage.h>
15 #include <linux/bitops.h>
16 #include <linux/ptrace.h>
17 #include <linux/ktime.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22 
23 #include <asm/ftrace.h>
24 
25 /*
26  * If the arch supports passing the variable contents of
27  * function_trace_op as the third parameter back from the
28  * mcount call, then the arch should define this as 1.
29  */
30 #ifndef ARCH_SUPPORTS_FTRACE_OPS
31 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 #endif
33 
34 #ifdef CONFIG_TRACING
35 extern void ftrace_boot_snapshot(void);
36 #else
37 static inline void ftrace_boot_snapshot(void) { }
38 #endif
39 
40 struct ftrace_ops;
41 struct ftrace_regs;
42 
43 #ifdef CONFIG_FUNCTION_TRACER
44 /*
45  * If the arch's mcount caller does not support all of ftrace's
46  * features, then it must call an indirect function that
47  * does. Or at least does enough to prevent any unwelcome side effects.
48  *
49  * Also define the function prototype that these architectures use
50  * to call the ftrace_ops_list_func().
51  */
52 #if !ARCH_SUPPORTS_FTRACE_OPS
53 # define FTRACE_FORCE_LIST_FUNC 1
54 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
55 #else
56 # define FTRACE_FORCE_LIST_FUNC 0
57 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
58 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
59 #endif
60 #endif /* CONFIG_FUNCTION_TRACER */
61 
62 /* Main tracing buffer and events set up */
63 #ifdef CONFIG_TRACING
64 void trace_init(void);
65 void early_trace_init(void);
66 #else
67 static inline void trace_init(void) { }
68 static inline void early_trace_init(void) { }
69 #endif
70 
71 struct module;
72 struct ftrace_hash;
73 struct ftrace_direct_func;
74 
75 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
76 	defined(CONFIG_DYNAMIC_FTRACE)
77 const char *
78 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
79 		   unsigned long *off, char **modname, char *sym);
80 #else
81 static inline const char *
82 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
83 		   unsigned long *off, char **modname, char *sym)
84 {
85 	return NULL;
86 }
87 #endif
88 
89 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
90 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
91 			   char *type, char *name,
92 			   char *module_name, int *exported);
93 #else
94 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
95 					 char *type, char *name,
96 					 char *module_name, int *exported)
97 {
98 	return -1;
99 }
100 #endif
101 
102 #ifdef CONFIG_FUNCTION_TRACER
103 
104 extern int ftrace_enabled;
105 
106 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
107 
108 struct ftrace_regs {
109 	struct pt_regs		regs;
110 };
111 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
112 
113 /*
114  * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
115  * if to allow setting of the instruction pointer from the ftrace_regs when
116  * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
117  */
118 #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
119 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
120 
121 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
122 {
123 	if (!fregs)
124 		return NULL;
125 
126 	return arch_ftrace_get_regs(fregs);
127 }
128 
129 /*
130  * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
131  * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
132  */
133 static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
134 {
135 	if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
136 		return true;
137 
138 	return ftrace_get_regs(fregs) != NULL;
139 }
140 
141 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
142 #define ftrace_regs_get_instruction_pointer(fregs) \
143 	instruction_pointer(ftrace_get_regs(fregs))
144 #define ftrace_regs_get_argument(fregs, n) \
145 	regs_get_kernel_argument(ftrace_get_regs(fregs), n)
146 #define ftrace_regs_get_stack_pointer(fregs) \
147 	kernel_stack_pointer(ftrace_get_regs(fregs))
148 #define ftrace_regs_return_value(fregs) \
149 	regs_return_value(ftrace_get_regs(fregs))
150 #define ftrace_regs_set_return_value(fregs, ret) \
151 	regs_set_return_value(ftrace_get_regs(fregs), ret)
152 #define ftrace_override_function_with_return(fregs) \
153 	override_function_with_return(ftrace_get_regs(fregs))
154 #define ftrace_regs_query_register_offset(name) \
155 	regs_query_register_offset(name)
156 #endif
157 
158 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
159 			      struct ftrace_ops *op, struct ftrace_regs *fregs);
160 
161 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
162 
163 /*
164  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
165  * set in the flags member.
166  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
167  * IPMODIFY are a kind of attribute flags which can be set only before
168  * registering the ftrace_ops, and can not be modified while registered.
169  * Changing those attribute flags after registering ftrace_ops will
170  * cause unexpected results.
171  *
172  * ENABLED - set/unset when ftrace_ops is registered/unregistered
173  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
174  *           allocated ftrace_ops which need special care
175  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
176  *            and passed to the callback. If this flag is set, but the
177  *            architecture does not support passing regs
178  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
179  *            ftrace_ops will fail to register, unless the next flag
180  *            is set.
181  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
182  *            handler can handle an arch that does not save regs
183  *            (the handler tests if regs == NULL), then it can set
184  *            this flag instead. It will not fail registering the ftrace_ops
185  *            but, the regs field will be NULL if the arch does not support
186  *            passing regs to the handler.
187  *            Note, if this flag is set, the SAVE_REGS flag will automatically
188  *            get set upon registering the ftrace_ops, if the arch supports it.
189  * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
190  *            that the call back needs recursion protection. If it does
191  *            not set this, then the ftrace infrastructure will assume
192  *            that the callback can handle recursion on its own.
193  * STUB   - The ftrace_ops is just a place holder.
194  * INITIALIZED - The ftrace_ops has already been initialized (first use time
195  *            register_ftrace_function() is called, it will initialized the ops)
196  * DELETED - The ops are being deleted, do not let them be registered again.
197  * ADDING  - The ops is in the process of being added.
198  * REMOVING - The ops is in the process of being removed.
199  * MODIFYING - The ops is in the process of changing its filter functions.
200  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
201  *            The arch specific code sets this flag when it allocated a
202  *            trampoline. This lets the arch know that it can update the
203  *            trampoline in case the callback function changes.
204  *            The ftrace_ops trampoline can be set by the ftrace users, and
205  *            in such cases the arch must not modify it. Only the arch ftrace
206  *            core code should set this flag.
207  * IPMODIFY - The ops can modify the IP register. This can only be set with
208  *            SAVE_REGS. If another ops with this flag set is already registered
209  *            for any of the functions that this ops will be registered for, then
210  *            this ops will fail to register or set_filter_ip.
211  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
212  * RCU     - Set when the ops can only be called when RCU is watching.
213  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
214  * PERMANENT - Set when the ops is permanent and should not be affected by
215  *             ftrace_enabled.
216  * DIRECT - Used by the direct ftrace_ops helper for direct functions
217  *            (internal ftrace only, should not be used by others)
218  */
219 enum {
220 	FTRACE_OPS_FL_ENABLED			= BIT(0),
221 	FTRACE_OPS_FL_DYNAMIC			= BIT(1),
222 	FTRACE_OPS_FL_SAVE_REGS			= BIT(2),
223 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= BIT(3),
224 	FTRACE_OPS_FL_RECURSION			= BIT(4),
225 	FTRACE_OPS_FL_STUB			= BIT(5),
226 	FTRACE_OPS_FL_INITIALIZED		= BIT(6),
227 	FTRACE_OPS_FL_DELETED			= BIT(7),
228 	FTRACE_OPS_FL_ADDING			= BIT(8),
229 	FTRACE_OPS_FL_REMOVING			= BIT(9),
230 	FTRACE_OPS_FL_MODIFYING			= BIT(10),
231 	FTRACE_OPS_FL_ALLOC_TRAMP		= BIT(11),
232 	FTRACE_OPS_FL_IPMODIFY			= BIT(12),
233 	FTRACE_OPS_FL_PID			= BIT(13),
234 	FTRACE_OPS_FL_RCU			= BIT(14),
235 	FTRACE_OPS_FL_TRACE_ARRAY		= BIT(15),
236 	FTRACE_OPS_FL_PERMANENT                 = BIT(16),
237 	FTRACE_OPS_FL_DIRECT			= BIT(17),
238 };
239 
240 /*
241  * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
242  * to a ftrace_ops. Note, the requests may fail.
243  *
244  * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
245  *                              function as an ops with IPMODIFY. Called
246  *                              when the DIRECT ops is being registered.
247  *                              This is called with both direct_mutex and
248  *                              ftrace_lock are locked.
249  *
250  * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
251  *                              function as an ops with IPMODIFY. Called
252  *                              when the other ops (the one with IPMODIFY)
253  *                              is being registered.
254  *                              This is called with direct_mutex locked.
255  *
256  * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
257  *                               function as an ops with IPMODIFY. Called
258  *                               when the other ops (the one with IPMODIFY)
259  *                               is being unregistered.
260  *                               This is called with direct_mutex locked.
261  */
262 enum ftrace_ops_cmd {
263 	FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
264 	FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
265 	FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
266 };
267 
268 /*
269  * For most ftrace_ops_cmd,
270  * Returns:
271  *        0 - Success.
272  *        Negative on failure. The return value is dependent on the
273  *        callback.
274  */
275 typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
276 
277 #ifdef CONFIG_DYNAMIC_FTRACE
278 /* The hash used to know what functions callbacks trace */
279 struct ftrace_ops_hash {
280 	struct ftrace_hash __rcu	*notrace_hash;
281 	struct ftrace_hash __rcu	*filter_hash;
282 	struct mutex			regex_lock;
283 };
284 
285 void ftrace_free_init_mem(void);
286 void ftrace_free_mem(struct module *mod, void *start, void *end);
287 #else
288 static inline void ftrace_free_init_mem(void)
289 {
290 	ftrace_boot_snapshot();
291 }
292 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
293 #endif
294 
295 /*
296  * Note, ftrace_ops can be referenced outside of RCU protection, unless
297  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
298  * core data, the unregistering of it will perform a scheduling on all CPUs
299  * to make sure that there are no more users. Depending on the load of the
300  * system that may take a bit of time.
301  *
302  * Any private data added must also take care not to be freed and if private
303  * data is added to a ftrace_ops that is in core code, the user of the
304  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
305  */
306 struct ftrace_ops {
307 	ftrace_func_t			func;
308 	struct ftrace_ops __rcu		*next;
309 	unsigned long			flags;
310 	void				*private;
311 	ftrace_func_t			saved_func;
312 #ifdef CONFIG_DYNAMIC_FTRACE
313 	struct ftrace_ops_hash		local_hash;
314 	struct ftrace_ops_hash		*func_hash;
315 	struct ftrace_ops_hash		old_hash;
316 	unsigned long			trampoline;
317 	unsigned long			trampoline_size;
318 	struct list_head		list;
319 	ftrace_ops_func_t		ops_func;
320 #endif
321 };
322 
323 extern struct ftrace_ops __rcu *ftrace_ops_list;
324 extern struct ftrace_ops ftrace_list_end;
325 
326 /*
327  * Traverse the ftrace_ops_list, invoking all entries.  The reason that we
328  * can use rcu_dereference_raw_check() is that elements removed from this list
329  * are simply leaked, so there is no need to interact with a grace-period
330  * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
331  * concurrent insertions into the ftrace_ops_list.
332  *
333  * Silly Alpha and silly pointer-speculation compiler optimizations!
334  */
335 #define do_for_each_ftrace_op(op, list)			\
336 	op = rcu_dereference_raw_check(list);			\
337 	do
338 
339 /*
340  * Optimized for just a single item in the list (as that is the normal case).
341  */
342 #define while_for_each_ftrace_op(op)				\
343 	while (likely(op = rcu_dereference_raw_check((op)->next)) &&	\
344 	       unlikely((op) != &ftrace_list_end))
345 
346 /*
347  * Type of the current tracing.
348  */
349 enum ftrace_tracing_type_t {
350 	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
351 	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
352 };
353 
354 /* Current tracing type, default is FTRACE_TYPE_ENTER */
355 extern enum ftrace_tracing_type_t ftrace_tracing_type;
356 
357 /*
358  * The ftrace_ops must be a static and should also
359  * be read_mostly.  These functions do modify read_mostly variables
360  * so use them sparely. Never free an ftrace_op or modify the
361  * next pointer after it has been registered. Even after unregistering
362  * it, the next pointer may still be used internally.
363  */
364 int register_ftrace_function(struct ftrace_ops *ops);
365 int unregister_ftrace_function(struct ftrace_ops *ops);
366 
367 extern void ftrace_stub(unsigned long a0, unsigned long a1,
368 			struct ftrace_ops *op, struct ftrace_regs *fregs);
369 
370 
371 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
372 #else /* !CONFIG_FUNCTION_TRACER */
373 /*
374  * (un)register_ftrace_function must be a macro since the ops parameter
375  * must not be evaluated.
376  */
377 #define register_ftrace_function(ops) ({ 0; })
378 #define unregister_ftrace_function(ops) ({ 0; })
379 static inline void ftrace_kill(void) { }
380 static inline void ftrace_free_init_mem(void) { }
381 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
382 static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
383 {
384 	return -EOPNOTSUPP;
385 }
386 #endif /* CONFIG_FUNCTION_TRACER */
387 
388 struct ftrace_func_entry {
389 	struct hlist_node hlist;
390 	unsigned long ip;
391 	unsigned long direct; /* for direct lookup only */
392 };
393 
394 struct dyn_ftrace;
395 
396 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
397 extern int ftrace_direct_func_count;
398 int register_ftrace_direct(unsigned long ip, unsigned long addr);
399 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
400 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
401 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
402 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
403 				struct dyn_ftrace *rec,
404 				unsigned long old_addr,
405 				unsigned long new_addr);
406 unsigned long ftrace_find_rec_direct(unsigned long ip);
407 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
408 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
409 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
410 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr);
411 
412 #else
413 struct ftrace_ops;
414 # define ftrace_direct_func_count 0
415 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
416 {
417 	return -ENOTSUPP;
418 }
419 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
420 {
421 	return -ENOTSUPP;
422 }
423 static inline int modify_ftrace_direct(unsigned long ip,
424 				       unsigned long old_addr, unsigned long new_addr)
425 {
426 	return -ENOTSUPP;
427 }
428 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
429 {
430 	return NULL;
431 }
432 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
433 					      struct dyn_ftrace *rec,
434 					      unsigned long old_addr,
435 					      unsigned long new_addr)
436 {
437 	return -ENODEV;
438 }
439 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
440 {
441 	return 0;
442 }
443 static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
444 {
445 	return -ENODEV;
446 }
447 static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
448 {
449 	return -ENODEV;
450 }
451 static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
452 {
453 	return -ENODEV;
454 }
455 static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
456 {
457 	return -ENODEV;
458 }
459 
460 /*
461  * This must be implemented by the architecture.
462  * It is the way the ftrace direct_ops helper, when called
463  * via ftrace (because there's other callbacks besides the
464  * direct call), can inform the architecture's trampoline that this
465  * routine has a direct caller, and what the caller is.
466  *
467  * For example, in x86, it returns the direct caller
468  * callback function via the regs->orig_ax parameter.
469  * Then in the ftrace trampoline, if this is set, it makes
470  * the return from the trampoline jump to the direct caller
471  * instead of going back to the function it just traced.
472  */
473 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
474 						 unsigned long addr) { }
475 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
476 
477 #ifdef CONFIG_STACK_TRACER
478 
479 extern int stack_tracer_enabled;
480 
481 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
482 		       size_t *lenp, loff_t *ppos);
483 
484 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
485 DECLARE_PER_CPU(int, disable_stack_tracer);
486 
487 /**
488  * stack_tracer_disable - temporarily disable the stack tracer
489  *
490  * There's a few locations (namely in RCU) where stack tracing
491  * cannot be executed. This function is used to disable stack
492  * tracing during those critical sections.
493  *
494  * This function must be called with preemption or interrupts
495  * disabled and stack_tracer_enable() must be called shortly after
496  * while preemption or interrupts are still disabled.
497  */
498 static inline void stack_tracer_disable(void)
499 {
500 	/* Preemption or interrupts must be disabled */
501 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
502 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
503 	this_cpu_inc(disable_stack_tracer);
504 }
505 
506 /**
507  * stack_tracer_enable - re-enable the stack tracer
508  *
509  * After stack_tracer_disable() is called, stack_tracer_enable()
510  * must be called shortly afterward.
511  */
512 static inline void stack_tracer_enable(void)
513 {
514 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
515 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
516 	this_cpu_dec(disable_stack_tracer);
517 }
518 #else
519 static inline void stack_tracer_disable(void) { }
520 static inline void stack_tracer_enable(void) { }
521 #endif
522 
523 #ifdef CONFIG_DYNAMIC_FTRACE
524 
525 void ftrace_arch_code_modify_prepare(void);
526 void ftrace_arch_code_modify_post_process(void);
527 
528 enum ftrace_bug_type {
529 	FTRACE_BUG_UNKNOWN,
530 	FTRACE_BUG_INIT,
531 	FTRACE_BUG_NOP,
532 	FTRACE_BUG_CALL,
533 	FTRACE_BUG_UPDATE,
534 };
535 extern enum ftrace_bug_type ftrace_bug_type;
536 
537 /*
538  * Archs can set this to point to a variable that holds the value that was
539  * expected at the call site before calling ftrace_bug().
540  */
541 extern const void *ftrace_expected;
542 
543 void ftrace_bug(int err, struct dyn_ftrace *rec);
544 
545 struct seq_file;
546 
547 extern int ftrace_text_reserved(const void *start, const void *end);
548 
549 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
550 
551 bool is_ftrace_trampoline(unsigned long addr);
552 
553 /*
554  * The dyn_ftrace record's flags field is split into two parts.
555  * the first part which is '0-FTRACE_REF_MAX' is a counter of
556  * the number of callbacks that have registered the function that
557  * the dyn_ftrace descriptor represents.
558  *
559  * The second part is a mask:
560  *  ENABLED - the function is being traced
561  *  REGS    - the record wants the function to save regs
562  *  REGS_EN - the function is set up to save regs.
563  *  IPMODIFY - the record allows for the IP address to be changed.
564  *  DISABLED - the record is not ready to be touched yet
565  *  DIRECT   - there is a direct function to call
566  *
567  * When a new ftrace_ops is registered and wants a function to save
568  * pt_regs, the rec->flags REGS is set. When the function has been
569  * set up to save regs, the REG_EN flag is set. Once a function
570  * starts saving regs it will do so until all ftrace_ops are removed
571  * from tracing that function.
572  */
573 enum {
574 	FTRACE_FL_ENABLED	= (1UL << 31),
575 	FTRACE_FL_REGS		= (1UL << 30),
576 	FTRACE_FL_REGS_EN	= (1UL << 29),
577 	FTRACE_FL_TRAMP		= (1UL << 28),
578 	FTRACE_FL_TRAMP_EN	= (1UL << 27),
579 	FTRACE_FL_IPMODIFY	= (1UL << 26),
580 	FTRACE_FL_DISABLED	= (1UL << 25),
581 	FTRACE_FL_DIRECT	= (1UL << 24),
582 	FTRACE_FL_DIRECT_EN	= (1UL << 23),
583 };
584 
585 #define FTRACE_REF_MAX_SHIFT	23
586 #define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
587 
588 #define ftrace_rec_count(rec)	((rec)->flags & FTRACE_REF_MAX)
589 
590 struct dyn_ftrace {
591 	unsigned long		ip; /* address of mcount call-site */
592 	unsigned long		flags;
593 	struct dyn_arch_ftrace	arch;
594 };
595 
596 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
597 			 int remove, int reset);
598 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
599 			  unsigned int cnt, int remove, int reset);
600 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
601 		       int len, int reset);
602 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
603 			int len, int reset);
604 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
605 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
606 void ftrace_free_filter(struct ftrace_ops *ops);
607 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
608 
609 enum {
610 	FTRACE_UPDATE_CALLS		= (1 << 0),
611 	FTRACE_DISABLE_CALLS		= (1 << 1),
612 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
613 	FTRACE_START_FUNC_RET		= (1 << 3),
614 	FTRACE_STOP_FUNC_RET		= (1 << 4),
615 	FTRACE_MAY_SLEEP		= (1 << 5),
616 };
617 
618 /*
619  * The FTRACE_UPDATE_* enum is used to pass information back
620  * from the ftrace_update_record() and ftrace_test_record()
621  * functions. These are called by the code update routines
622  * to find out what is to be done for a given function.
623  *
624  *  IGNORE           - The function is already what we want it to be
625  *  MAKE_CALL        - Start tracing the function
626  *  MODIFY_CALL      - Stop saving regs for the function
627  *  MAKE_NOP         - Stop tracing the function
628  */
629 enum {
630 	FTRACE_UPDATE_IGNORE,
631 	FTRACE_UPDATE_MAKE_CALL,
632 	FTRACE_UPDATE_MODIFY_CALL,
633 	FTRACE_UPDATE_MAKE_NOP,
634 };
635 
636 enum {
637 	FTRACE_ITER_FILTER	= (1 << 0),
638 	FTRACE_ITER_NOTRACE	= (1 << 1),
639 	FTRACE_ITER_PRINTALL	= (1 << 2),
640 	FTRACE_ITER_DO_PROBES	= (1 << 3),
641 	FTRACE_ITER_PROBE	= (1 << 4),
642 	FTRACE_ITER_MOD		= (1 << 5),
643 	FTRACE_ITER_ENABLED	= (1 << 6),
644 };
645 
646 void arch_ftrace_update_code(int command);
647 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
648 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
649 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
650 
651 struct ftrace_rec_iter;
652 
653 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
654 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
655 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
656 
657 #define for_ftrace_rec_iter(iter)		\
658 	for (iter = ftrace_rec_iter_start();	\
659 	     iter;				\
660 	     iter = ftrace_rec_iter_next(iter))
661 
662 
663 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
664 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
665 void ftrace_run_stop_machine(int command);
666 unsigned long ftrace_location(unsigned long ip);
667 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
668 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
669 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
670 
671 extern ftrace_func_t ftrace_trace_function;
672 
673 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
674 		  struct inode *inode, struct file *file);
675 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
676 			    size_t cnt, loff_t *ppos);
677 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
678 			     size_t cnt, loff_t *ppos);
679 int ftrace_regex_release(struct inode *inode, struct file *file);
680 
681 void __init
682 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
683 
684 /* defined in arch */
685 extern int ftrace_ip_converted(unsigned long ip);
686 extern int ftrace_dyn_arch_init(void);
687 extern void ftrace_replace_code(int enable);
688 extern int ftrace_update_ftrace_func(ftrace_func_t func);
689 extern void ftrace_caller(void);
690 extern void ftrace_regs_caller(void);
691 extern void ftrace_call(void);
692 extern void ftrace_regs_call(void);
693 extern void mcount_call(void);
694 
695 void ftrace_modify_all_code(int command);
696 
697 #ifndef FTRACE_ADDR
698 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
699 #endif
700 
701 #ifndef FTRACE_GRAPH_ADDR
702 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
703 #endif
704 
705 #ifndef FTRACE_REGS_ADDR
706 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
707 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
708 #else
709 # define FTRACE_REGS_ADDR FTRACE_ADDR
710 #endif
711 #endif
712 
713 /*
714  * If an arch would like functions that are only traced
715  * by the function graph tracer to jump directly to its own
716  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
717  * to be that address to jump to.
718  */
719 #ifndef FTRACE_GRAPH_TRAMP_ADDR
720 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
721 #endif
722 
723 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
724 extern void ftrace_graph_caller(void);
725 extern int ftrace_enable_ftrace_graph_caller(void);
726 extern int ftrace_disable_ftrace_graph_caller(void);
727 #else
728 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
729 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
730 #endif
731 
732 /**
733  * ftrace_make_nop - convert code into nop
734  * @mod: module structure if called by module load initialization
735  * @rec: the call site record (e.g. mcount/fentry)
736  * @addr: the address that the call site should be calling
737  *
738  * This is a very sensitive operation and great care needs
739  * to be taken by the arch.  The operation should carefully
740  * read the location, check to see if what is read is indeed
741  * what we expect it to be, and then on success of the compare,
742  * it should write to the location.
743  *
744  * The code segment at @rec->ip should be a caller to @addr
745  *
746  * Return must be:
747  *  0 on success
748  *  -EFAULT on error reading the location
749  *  -EINVAL on a failed compare of the contents
750  *  -EPERM  on error writing to the location
751  * Any other value will be considered a failure.
752  */
753 extern int ftrace_make_nop(struct module *mod,
754 			   struct dyn_ftrace *rec, unsigned long addr);
755 
756 /**
757  * ftrace_need_init_nop - return whether nop call sites should be initialized
758  *
759  * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
760  * need to call ftrace_init_nop() if the code is built with that flag.
761  * Architectures where this is not always the case may define their own
762  * condition.
763  *
764  * Return must be:
765  *  0	    if ftrace_init_nop() should be called
766  *  Nonzero if ftrace_init_nop() should not be called
767  */
768 
769 #ifndef ftrace_need_init_nop
770 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
771 #endif
772 
773 /**
774  * ftrace_init_nop - initialize a nop call site
775  * @mod: module structure if called by module load initialization
776  * @rec: the call site record (e.g. mcount/fentry)
777  *
778  * This is a very sensitive operation and great care needs
779  * to be taken by the arch.  The operation should carefully
780  * read the location, check to see if what is read is indeed
781  * what we expect it to be, and then on success of the compare,
782  * it should write to the location.
783  *
784  * The code segment at @rec->ip should contain the contents created by
785  * the compiler
786  *
787  * Return must be:
788  *  0 on success
789  *  -EFAULT on error reading the location
790  *  -EINVAL on a failed compare of the contents
791  *  -EPERM  on error writing to the location
792  * Any other value will be considered a failure.
793  */
794 #ifndef ftrace_init_nop
795 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
796 {
797 	return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
798 }
799 #endif
800 
801 /**
802  * ftrace_make_call - convert a nop call site into a call to addr
803  * @rec: the call site record (e.g. mcount/fentry)
804  * @addr: the address that the call site should call
805  *
806  * This is a very sensitive operation and great care needs
807  * to be taken by the arch.  The operation should carefully
808  * read the location, check to see if what is read is indeed
809  * what we expect it to be, and then on success of the compare,
810  * it should write to the location.
811  *
812  * The code segment at @rec->ip should be a nop
813  *
814  * Return must be:
815  *  0 on success
816  *  -EFAULT on error reading the location
817  *  -EINVAL on a failed compare of the contents
818  *  -EPERM  on error writing to the location
819  * Any other value will be considered a failure.
820  */
821 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
822 
823 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
824 /**
825  * ftrace_modify_call - convert from one addr to another (no nop)
826  * @rec: the call site record (e.g. mcount/fentry)
827  * @old_addr: the address expected to be currently called to
828  * @addr: the address to change to
829  *
830  * This is a very sensitive operation and great care needs
831  * to be taken by the arch.  The operation should carefully
832  * read the location, check to see if what is read is indeed
833  * what we expect it to be, and then on success of the compare,
834  * it should write to the location.
835  *
836  * The code segment at @rec->ip should be a caller to @old_addr
837  *
838  * Return must be:
839  *  0 on success
840  *  -EFAULT on error reading the location
841  *  -EINVAL on a failed compare of the contents
842  *  -EPERM  on error writing to the location
843  * Any other value will be considered a failure.
844  */
845 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
846 			      unsigned long addr);
847 #else
848 /* Should never be called */
849 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
850 				     unsigned long addr)
851 {
852 	return -EINVAL;
853 }
854 #endif
855 
856 /* May be defined in arch */
857 extern int ftrace_arch_read_dyn_info(char *buf, int size);
858 
859 extern int skip_trace(unsigned long ip);
860 extern void ftrace_module_init(struct module *mod);
861 extern void ftrace_module_enable(struct module *mod);
862 extern void ftrace_release_mod(struct module *mod);
863 
864 extern void ftrace_disable_daemon(void);
865 extern void ftrace_enable_daemon(void);
866 #else /* CONFIG_DYNAMIC_FTRACE */
867 static inline int skip_trace(unsigned long ip) { return 0; }
868 static inline void ftrace_disable_daemon(void) { }
869 static inline void ftrace_enable_daemon(void) { }
870 static inline void ftrace_module_init(struct module *mod) { }
871 static inline void ftrace_module_enable(struct module *mod) { }
872 static inline void ftrace_release_mod(struct module *mod) { }
873 static inline int ftrace_text_reserved(const void *start, const void *end)
874 {
875 	return 0;
876 }
877 static inline unsigned long ftrace_location(unsigned long ip)
878 {
879 	return 0;
880 }
881 
882 /*
883  * Again users of functions that have ftrace_ops may not
884  * have them defined when ftrace is not enabled, but these
885  * functions may still be called. Use a macro instead of inline.
886  */
887 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
888 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
889 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
890 #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
891 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
892 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
893 #define ftrace_free_filter(ops) do { } while (0)
894 #define ftrace_ops_set_global_filter(ops) do { } while (0)
895 
896 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
897 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
898 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
899 			     size_t cnt, loff_t *ppos) { return -ENODEV; }
900 static inline int
901 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
902 
903 static inline bool is_ftrace_trampoline(unsigned long addr)
904 {
905 	return false;
906 }
907 #endif /* CONFIG_DYNAMIC_FTRACE */
908 
909 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
910 #ifndef ftrace_graph_func
911 #define ftrace_graph_func ftrace_stub
912 #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
913 #else
914 #define FTRACE_OPS_GRAPH_STUB 0
915 #endif
916 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
917 
918 /* totally disable ftrace - can not re-enable after this */
919 void ftrace_kill(void);
920 
921 static inline void tracer_disable(void)
922 {
923 #ifdef CONFIG_FUNCTION_TRACER
924 	ftrace_enabled = 0;
925 #endif
926 }
927 
928 /*
929  * Ftrace disable/restore without lock. Some synchronization mechanism
930  * must be used to prevent ftrace_enabled to be changed between
931  * disable/restore.
932  */
933 static inline int __ftrace_enabled_save(void)
934 {
935 #ifdef CONFIG_FUNCTION_TRACER
936 	int saved_ftrace_enabled = ftrace_enabled;
937 	ftrace_enabled = 0;
938 	return saved_ftrace_enabled;
939 #else
940 	return 0;
941 #endif
942 }
943 
944 static inline void __ftrace_enabled_restore(int enabled)
945 {
946 #ifdef CONFIG_FUNCTION_TRACER
947 	ftrace_enabled = enabled;
948 #endif
949 }
950 
951 /* All archs should have this, but we define it for consistency */
952 #ifndef ftrace_return_address0
953 # define ftrace_return_address0 __builtin_return_address(0)
954 #endif
955 
956 /* Archs may use other ways for ADDR1 and beyond */
957 #ifndef ftrace_return_address
958 # ifdef CONFIG_FRAME_POINTER
959 #  define ftrace_return_address(n) __builtin_return_address(n)
960 # else
961 #  define ftrace_return_address(n) 0UL
962 # endif
963 #endif
964 
965 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
966 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
967 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
968 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
969 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
970 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
971 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
972 
973 static inline unsigned long get_lock_parent_ip(void)
974 {
975 	unsigned long addr = CALLER_ADDR0;
976 
977 	if (!in_lock_functions(addr))
978 		return addr;
979 	addr = CALLER_ADDR1;
980 	if (!in_lock_functions(addr))
981 		return addr;
982 	return CALLER_ADDR2;
983 }
984 
985 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
986   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
987   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
988 #else
989 /*
990  * Use defines instead of static inlines because some arches will make code out
991  * of the CALLER_ADDR, when we really want these to be a real nop.
992  */
993 # define trace_preempt_on(a0, a1) do { } while (0)
994 # define trace_preempt_off(a0, a1) do { } while (0)
995 #endif
996 
997 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
998 extern void ftrace_init(void);
999 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
1000 #define FTRACE_CALLSITE_SECTION	"__patchable_function_entries"
1001 #else
1002 #define FTRACE_CALLSITE_SECTION	"__mcount_loc"
1003 #endif
1004 #else
1005 static inline void ftrace_init(void) { }
1006 #endif
1007 
1008 /*
1009  * Structure that defines an entry function trace.
1010  * It's already packed but the attribute "packed" is needed
1011  * to remove extra padding at the end.
1012  */
1013 struct ftrace_graph_ent {
1014 	unsigned long func; /* Current function */
1015 	int depth;
1016 } __packed;
1017 
1018 /*
1019  * Structure that defines a return function trace.
1020  * It's already packed but the attribute "packed" is needed
1021  * to remove extra padding at the end.
1022  */
1023 struct ftrace_graph_ret {
1024 	unsigned long func; /* Current function */
1025 	int depth;
1026 	/* Number of functions that overran the depth limit for current task */
1027 	unsigned int overrun;
1028 	unsigned long long calltime;
1029 	unsigned long long rettime;
1030 } __packed;
1031 
1032 /* Type of the callback handlers for tracing function graph*/
1033 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
1034 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1035 
1036 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1037 
1038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1039 
1040 struct fgraph_ops {
1041 	trace_func_graph_ent_t		entryfunc;
1042 	trace_func_graph_ret_t		retfunc;
1043 };
1044 
1045 /*
1046  * Stack of return addresses for functions
1047  * of a thread.
1048  * Used in struct thread_info
1049  */
1050 struct ftrace_ret_stack {
1051 	unsigned long ret;
1052 	unsigned long func;
1053 	unsigned long long calltime;
1054 #ifdef CONFIG_FUNCTION_PROFILER
1055 	unsigned long long subtime;
1056 #endif
1057 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
1058 	unsigned long fp;
1059 #endif
1060 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
1061 	unsigned long *retp;
1062 #endif
1063 };
1064 
1065 /*
1066  * Primary handler of a function return.
1067  * It relays on ftrace_return_to_handler.
1068  * Defined in entry_32/64.S
1069  */
1070 extern void return_to_handler(void);
1071 
1072 extern int
1073 function_graph_enter(unsigned long ret, unsigned long func,
1074 		     unsigned long frame_pointer, unsigned long *retp);
1075 
1076 struct ftrace_ret_stack *
1077 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1078 
1079 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1080 				    unsigned long ret, unsigned long *retp);
1081 
1082 /*
1083  * Sometimes we don't want to trace a function with the function
1084  * graph tracer but we want them to keep traced by the usual function
1085  * tracer if the function graph tracer is not configured.
1086  */
1087 #define __notrace_funcgraph		notrace
1088 
1089 #define FTRACE_RETFUNC_DEPTH 50
1090 #define FTRACE_RETSTACK_ALLOC_SIZE 32
1091 
1092 extern int register_ftrace_graph(struct fgraph_ops *ops);
1093 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1094 
1095 /**
1096  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1097  *
1098  * ftrace_graph_stop() is called when a severe error is detected in
1099  * the function graph tracing. This function is called by the critical
1100  * paths of function graph to keep those paths from doing any more harm.
1101  */
1102 DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1103 
1104 static inline bool ftrace_graph_is_dead(void)
1105 {
1106 	return static_branch_unlikely(&kill_ftrace_graph);
1107 }
1108 
1109 extern void ftrace_graph_stop(void);
1110 
1111 /* The current handlers in use */
1112 extern trace_func_graph_ret_t ftrace_graph_return;
1113 extern trace_func_graph_ent_t ftrace_graph_entry;
1114 
1115 extern void ftrace_graph_init_task(struct task_struct *t);
1116 extern void ftrace_graph_exit_task(struct task_struct *t);
1117 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
1118 
1119 static inline void pause_graph_tracing(void)
1120 {
1121 	atomic_inc(&current->tracing_graph_pause);
1122 }
1123 
1124 static inline void unpause_graph_tracing(void)
1125 {
1126 	atomic_dec(&current->tracing_graph_pause);
1127 }
1128 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
1129 
1130 #define __notrace_funcgraph
1131 
1132 static inline void ftrace_graph_init_task(struct task_struct *t) { }
1133 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
1134 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
1135 
1136 /* Define as macros as fgraph_ops may not be defined */
1137 #define register_ftrace_graph(ops) ({ -1; })
1138 #define unregister_ftrace_graph(ops) do { } while (0)
1139 
1140 static inline unsigned long
1141 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1142 		      unsigned long *retp)
1143 {
1144 	return ret;
1145 }
1146 
1147 static inline void pause_graph_tracing(void) { }
1148 static inline void unpause_graph_tracing(void) { }
1149 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1150 
1151 #ifdef CONFIG_TRACING
1152 enum ftrace_dump_mode;
1153 
1154 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1155 extern int tracepoint_printk;
1156 
1157 extern void disable_trace_on_warning(void);
1158 extern int __disable_trace_on_warning;
1159 
1160 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1161 			     void *buffer, size_t *lenp, loff_t *ppos);
1162 
1163 #else /* CONFIG_TRACING */
1164 static inline void  disable_trace_on_warning(void) { }
1165 #endif /* CONFIG_TRACING */
1166 
1167 #ifdef CONFIG_FTRACE_SYSCALLS
1168 
1169 unsigned long arch_syscall_addr(int nr);
1170 
1171 #endif /* CONFIG_FTRACE_SYSCALLS */
1172 
1173 #endif /* _LINUX_FTRACE_H */
1174