1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
29849ed4dSMike Frysinger /*
39849ed4dSMike Frysinger * Ftrace header. For implementation details beyond the random comments
45fb94e9cSMauro Carvalho Chehab * scattered below, see: Documentation/trace/ftrace-design.rst
59849ed4dSMike Frysinger */
69849ed4dSMike Frysinger
716444a8aSArnaldo Carvalho de Melo #ifndef _LINUX_FTRACE_H
816444a8aSArnaldo Carvalho de Melo #define _LINUX_FTRACE_H
916444a8aSArnaldo Carvalho de Melo
100264c8c9SSteven Rostedt (VMware) #include <linux/trace_recursion.h>
110012693aSFrederic Weisbecker #include <linux/trace_clock.h>
1218bfee32SChristophe Leroy #include <linux/jump_label.h>
135601020fSFrederic Weisbecker #include <linux/kallsyms.h>
140012693aSFrederic Weisbecker #include <linux/linkage.h>
15ea4e2bc4SSteven Rostedt #include <linux/bitops.h>
16a1e2e31dSSteven Rostedt #include <linux/ptrace.h>
170012693aSFrederic Weisbecker #include <linux/ktime.h>
1821a8c466SFrederic Weisbecker #include <linux/sched.h>
190012693aSFrederic Weisbecker #include <linux/types.h>
200012693aSFrederic Weisbecker #include <linux/init.h>
210012693aSFrederic Weisbecker #include <linux/fs.h>
2216444a8aSArnaldo Carvalho de Melo
23c79a61f5SUwe Kleine-Koenig #include <asm/ftrace.h>
24c79a61f5SUwe Kleine-Koenig
252f5f6ad9SSteven Rostedt /*
262f5f6ad9SSteven Rostedt * If the arch supports passing the variable contents of
272f5f6ad9SSteven Rostedt * function_trace_op as the third parameter back from the
282f5f6ad9SSteven Rostedt * mcount call, then the arch should define this as 1.
292f5f6ad9SSteven Rostedt */
302f5f6ad9SSteven Rostedt #ifndef ARCH_SUPPORTS_FTRACE_OPS
312f5f6ad9SSteven Rostedt #define ARCH_SUPPORTS_FTRACE_OPS 0
322f5f6ad9SSteven Rostedt #endif
332f5f6ad9SSteven Rostedt
34380af29bSSteven Rostedt (Google) #ifdef CONFIG_TRACING
35380af29bSSteven Rostedt (Google) extern void ftrace_boot_snapshot(void);
36380af29bSSteven Rostedt (Google) #else
ftrace_boot_snapshot(void)37380af29bSSteven Rostedt (Google) static inline void ftrace_boot_snapshot(void) { }
38380af29bSSteven Rostedt (Google) #endif
39380af29bSSteven Rostedt (Google)
4034cdd18bSSteven Rostedt (VMware) struct ftrace_ops;
4134cdd18bSSteven Rostedt (VMware) struct ftrace_regs;
42cbad0fb2SMark Rutland struct dyn_ftrace;
439705bc70SMark Rutland
44*7d8b31b7SArnd Bergmann char *arch_ftrace_match_adjust(char *str, const char *search);
45*7d8b31b7SArnd Bergmann
46*7d8b31b7SArnd Bergmann #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
47*7d8b31b7SArnd Bergmann struct fgraph_ret_regs;
48*7d8b31b7SArnd Bergmann unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
49*7d8b31b7SArnd Bergmann #else
50*7d8b31b7SArnd Bergmann unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
51*7d8b31b7SArnd Bergmann #endif
52*7d8b31b7SArnd Bergmann
539705bc70SMark Rutland #ifdef CONFIG_FUNCTION_TRACER
54ccf3672dSSteven Rostedt /*
55ccf3672dSSteven Rostedt * If the arch's mcount caller does not support all of ftrace's
56ccf3672dSSteven Rostedt * features, then it must call an indirect function that
57f2cc020dSIngo Molnar * does. Or at least does enough to prevent any unwelcome side effects.
5834cdd18bSSteven Rostedt (VMware) *
5934cdd18bSSteven Rostedt (VMware) * Also define the function prototype that these architectures use
6034cdd18bSSteven Rostedt (VMware) * to call the ftrace_ops_list_func().
61ccf3672dSSteven Rostedt */
627544256aSSteven Rostedt (Red Hat) #if !ARCH_SUPPORTS_FTRACE_OPS
63ccf3672dSSteven Rostedt # define FTRACE_FORCE_LIST_FUNC 1
6434cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
65ccf3672dSSteven Rostedt #else
66ccf3672dSSteven Rostedt # define FTRACE_FORCE_LIST_FUNC 0
6734cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6834cdd18bSSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
69ccf3672dSSteven Rostedt #endif
70cbad0fb2SMark Rutland extern const struct ftrace_ops ftrace_nop_ops;
71cbad0fb2SMark Rutland extern const struct ftrace_ops ftrace_list_ops;
72cbad0fb2SMark Rutland struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
7334cdd18bSSteven Rostedt (VMware) #endif /* CONFIG_FUNCTION_TRACER */
74ccf3672dSSteven Rostedt
755f893b26SSteven Rostedt (Red Hat) /* Main tracing buffer and events set up */
765f893b26SSteven Rostedt (Red Hat) #ifdef CONFIG_TRACING
775f893b26SSteven Rostedt (Red Hat) void trace_init(void);
78e725c731SSteven Rostedt (VMware) void early_trace_init(void);
795f893b26SSteven Rostedt (Red Hat) #else
trace_init(void)805f893b26SSteven Rostedt (Red Hat) static inline void trace_init(void) { }
early_trace_init(void)81e725c731SSteven Rostedt (VMware) static inline void early_trace_init(void) { }
825f893b26SSteven Rostedt (Red Hat) #endif
83ccf3672dSSteven Rostedt
84de477254SPaul Gortmaker struct module;
8504da85b8SSteven Rostedt struct ftrace_hash;
86013bf0daSSteven Rostedt (VMware) struct ftrace_direct_func;
8704da85b8SSteven Rostedt
88aba4b5c2SSteven Rostedt (VMware) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
89aba4b5c2SSteven Rostedt (VMware) defined(CONFIG_DYNAMIC_FTRACE)
90aba4b5c2SSteven Rostedt (VMware) const char *
91aba4b5c2SSteven Rostedt (VMware) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
92aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char **modname, char *sym);
93aba4b5c2SSteven Rostedt (VMware) #else
94aba4b5c2SSteven Rostedt (VMware) static inline const char *
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)95aba4b5c2SSteven Rostedt (VMware) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
96aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char **modname, char *sym)
97aba4b5c2SSteven Rostedt (VMware) {
98aba4b5c2SSteven Rostedt (VMware) return NULL;
99aba4b5c2SSteven Rostedt (VMware) }
100fc0ea795SAdrian Hunter #endif
101fc0ea795SAdrian Hunter
102fc0ea795SAdrian Hunter #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
103fc0ea795SAdrian Hunter int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
104fc0ea795SAdrian Hunter char *type, char *name,
105fc0ea795SAdrian Hunter char *module_name, int *exported);
106fc0ea795SAdrian Hunter #else
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)1076171a031SSteven Rostedt (VMware) static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
1086171a031SSteven Rostedt (VMware) char *type, char *name,
1096171a031SSteven Rostedt (VMware) char *module_name, int *exported)
1106171a031SSteven Rostedt (VMware) {
1116171a031SSteven Rostedt (VMware) return -1;
1126171a031SSteven Rostedt (VMware) }
113aba4b5c2SSteven Rostedt (VMware) #endif
114aba4b5c2SSteven Rostedt (VMware)
115606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
1163e1932adSIngo Molnar
117b0fc494fSSteven Rostedt extern int ftrace_enabled;
118b0fc494fSSteven Rostedt
11902a474caSSteven Rostedt (VMware) #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
12002a474caSSteven Rostedt (VMware)
121d19ad077SSteven Rostedt (VMware) struct ftrace_regs {
122d19ad077SSteven Rostedt (VMware) struct pt_regs regs;
123d19ad077SSteven Rostedt (VMware) };
12402a474caSSteven Rostedt (VMware) #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
12502a474caSSteven Rostedt (VMware)
1262860cd8aSSteven Rostedt (VMware) /*
1270ef86097SMark Rutland * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
1280ef86097SMark Rutland * if to allow setting of the instruction pointer from the ftrace_regs when
1290ef86097SMark Rutland * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
1302860cd8aSSteven Rostedt (VMware) */
1310ef86097SMark Rutland #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
13202a474caSSteven Rostedt (VMware) #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
133d19ad077SSteven Rostedt (VMware)
ftrace_get_regs(struct ftrace_regs * fregs)134d19ad077SSteven Rostedt (VMware) static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
135d19ad077SSteven Rostedt (VMware) {
136d19ad077SSteven Rostedt (VMware) if (!fregs)
137d19ad077SSteven Rostedt (VMware) return NULL;
138d19ad077SSteven Rostedt (VMware)
13902a474caSSteven Rostedt (VMware) return arch_ftrace_get_regs(fregs);
140d19ad077SSteven Rostedt (VMware) }
141d19ad077SSteven Rostedt (VMware)
14294d095ffSMark Rutland /*
14394d095ffSMark Rutland * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
14494d095ffSMark Rutland * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
14594d095ffSMark Rutland */
ftrace_regs_has_args(struct ftrace_regs * fregs)14694d095ffSMark Rutland static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
14794d095ffSMark Rutland {
14894d095ffSMark Rutland if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
14994d095ffSMark Rutland return true;
15094d095ffSMark Rutland
15194d095ffSMark Rutland return ftrace_get_regs(fregs) != NULL;
15294d095ffSMark Rutland }
15394d095ffSMark Rutland
15494d095ffSMark Rutland #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
15594d095ffSMark Rutland #define ftrace_regs_get_instruction_pointer(fregs) \
15694d095ffSMark Rutland instruction_pointer(ftrace_get_regs(fregs))
15794d095ffSMark Rutland #define ftrace_regs_get_argument(fregs, n) \
15894d095ffSMark Rutland regs_get_kernel_argument(ftrace_get_regs(fregs), n)
15994d095ffSMark Rutland #define ftrace_regs_get_stack_pointer(fregs) \
16094d095ffSMark Rutland kernel_stack_pointer(ftrace_get_regs(fregs))
16194d095ffSMark Rutland #define ftrace_regs_return_value(fregs) \
16294d095ffSMark Rutland regs_return_value(ftrace_get_regs(fregs))
16394d095ffSMark Rutland #define ftrace_regs_set_return_value(fregs, ret) \
16494d095ffSMark Rutland regs_set_return_value(ftrace_get_regs(fregs), ret)
16594d095ffSMark Rutland #define ftrace_override_function_with_return(fregs) \
16694d095ffSMark Rutland override_function_with_return(ftrace_get_regs(fregs))
16794d095ffSMark Rutland #define ftrace_regs_query_register_offset(name) \
16894d095ffSMark Rutland regs_query_register_offset(name)
16994d095ffSMark Rutland #endif
17094d095ffSMark Rutland
1712f5f6ad9SSteven Rostedt typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
172d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
17316444a8aSArnaldo Carvalho de Melo
17487354059SSteven Rostedt (Red Hat) ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
17587354059SSteven Rostedt (Red Hat)
176e248491aSJiri Olsa /*
177e248491aSJiri Olsa * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
178e248491aSJiri Olsa * set in the flags member.
179a25d036dSSteven Rostedt (VMware) * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
180f8b8be8aSMasami Hiramatsu * IPMODIFY are a kind of attribute flags which can be set only before
181f8b8be8aSMasami Hiramatsu * registering the ftrace_ops, and can not be modified while registered.
182ad61dd30SStephen Boyd * Changing those attribute flags after registering ftrace_ops will
183f8b8be8aSMasami Hiramatsu * cause unexpected results.
184e248491aSJiri Olsa *
185e248491aSJiri Olsa * ENABLED - set/unset when ftrace_ops is registered/unregistered
186e248491aSJiri Olsa * DYNAMIC - set when ftrace_ops is registered to denote dynamically
187e248491aSJiri Olsa * allocated ftrace_ops which need special care
18808f6fba5SSteven Rostedt * SAVE_REGS - The ftrace_ops wants regs saved at each function called
18908f6fba5SSteven Rostedt * and passed to the callback. If this flag is set, but the
19008f6fba5SSteven Rostedt * architecture does not support passing regs
19106aeaaeaSMasami Hiramatsu * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
19208f6fba5SSteven Rostedt * ftrace_ops will fail to register, unless the next flag
19308f6fba5SSteven Rostedt * is set.
19408f6fba5SSteven Rostedt * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
19508f6fba5SSteven Rostedt * handler can handle an arch that does not save regs
19608f6fba5SSteven Rostedt * (the handler tests if regs == NULL), then it can set
19708f6fba5SSteven Rostedt * this flag instead. It will not fail registering the ftrace_ops
19808f6fba5SSteven Rostedt * but, the regs field will be NULL if the arch does not support
19908f6fba5SSteven Rostedt * passing regs to the handler.
20008f6fba5SSteven Rostedt * Note, if this flag is set, the SAVE_REGS flag will automatically
20108f6fba5SSteven Rostedt * get set upon registering the ftrace_ops, if the arch supports it.
202a25d036dSSteven Rostedt (VMware) * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
203a25d036dSSteven Rostedt (VMware) * that the call back needs recursion protection. If it does
204a25d036dSSteven Rostedt (VMware) * not set this, then the ftrace infrastructure will assume
205a25d036dSSteven Rostedt (VMware) * that the callback can handle recursion on its own.
206395b97a3SSteven Rostedt (Red Hat) * STUB - The ftrace_ops is just a place holder.
207f04f24fbSMasami Hiramatsu * INITIALIZED - The ftrace_ops has already been initialized (first use time
208f04f24fbSMasami Hiramatsu * register_ftrace_function() is called, it will initialized the ops)
209591dffdaSSteven Rostedt (Red Hat) * DELETED - The ops are being deleted, do not let them be registered again.
210e1effa01SSteven Rostedt (Red Hat) * ADDING - The ops is in the process of being added.
211e1effa01SSteven Rostedt (Red Hat) * REMOVING - The ops is in the process of being removed.
212e1effa01SSteven Rostedt (Red Hat) * MODIFYING - The ops is in the process of changing its filter functions.
213f3bea491SSteven Rostedt (Red Hat) * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
214f3bea491SSteven Rostedt (Red Hat) * The arch specific code sets this flag when it allocated a
215f3bea491SSteven Rostedt (Red Hat) * trampoline. This lets the arch know that it can update the
216f3bea491SSteven Rostedt (Red Hat) * trampoline in case the callback function changes.
217f3bea491SSteven Rostedt (Red Hat) * The ftrace_ops trampoline can be set by the ftrace users, and
218f3bea491SSteven Rostedt (Red Hat) * in such cases the arch must not modify it. Only the arch ftrace
219f3bea491SSteven Rostedt (Red Hat) * core code should set this flag.
220f8b8be8aSMasami Hiramatsu * IPMODIFY - The ops can modify the IP register. This can only be set with
221f8b8be8aSMasami Hiramatsu * SAVE_REGS. If another ops with this flag set is already registered
222f8b8be8aSMasami Hiramatsu * for any of the functions that this ops will be registered for, then
223f8b8be8aSMasami Hiramatsu * this ops will fail to register or set_filter_ip.
224e3eea140SSteven Rostedt (Red Hat) * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
225d0ba52f1SSteven Rostedt (VMware) * RCU - Set when the ops can only be called when RCU is watching.
2268c08f0d5SSteven Rostedt (VMware) * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
2277162431dSMiroslav Benes * PERMANENT - Set when the ops is permanent and should not be affected by
2287162431dSMiroslav Benes * ftrace_enabled.
229763e34e7SSteven Rostedt (VMware) * DIRECT - Used by the direct ftrace_ops helper for direct functions
230763e34e7SSteven Rostedt (VMware) * (internal ftrace only, should not be used by others)
231e248491aSJiri Olsa */
232b848914cSSteven Rostedt enum {
233b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ENABLED = BIT(0),
234b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DYNAMIC = BIT(1),
235b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_SAVE_REGS = BIT(2),
236b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
237a25d036dSSteven Rostedt (VMware) FTRACE_OPS_FL_RECURSION = BIT(4),
238b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_STUB = BIT(5),
239b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_INITIALIZED = BIT(6),
240b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DELETED = BIT(7),
241b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ADDING = BIT(8),
242b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_REMOVING = BIT(9),
243b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_MODIFYING = BIT(10),
244b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
245b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_IPMODIFY = BIT(12),
246b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_PID = BIT(13),
247b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_RCU = BIT(14),
248b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
249b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_PERMANENT = BIT(16),
250b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DIRECT = BIT(17),
251b848914cSSteven Rostedt };
252b848914cSSteven Rostedt
25360c89718SFlorent Revest #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
25460c89718SFlorent Revest #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
25560c89718SFlorent Revest #else
25660c89718SFlorent Revest #define FTRACE_OPS_FL_SAVE_ARGS 0
25760c89718SFlorent Revest #endif
25860c89718SFlorent Revest
25953cd885bSSong Liu /*
26053cd885bSSong Liu * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
26153cd885bSSong Liu * to a ftrace_ops. Note, the requests may fail.
26253cd885bSSong Liu *
26353cd885bSSong Liu * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
26453cd885bSSong Liu * function as an ops with IPMODIFY. Called
26553cd885bSSong Liu * when the DIRECT ops is being registered.
26653cd885bSSong Liu * This is called with both direct_mutex and
26753cd885bSSong Liu * ftrace_lock are locked.
26853cd885bSSong Liu *
26953cd885bSSong Liu * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
27053cd885bSSong Liu * function as an ops with IPMODIFY. Called
27153cd885bSSong Liu * when the other ops (the one with IPMODIFY)
27253cd885bSSong Liu * is being registered.
27353cd885bSSong Liu * This is called with direct_mutex locked.
27453cd885bSSong Liu *
27553cd885bSSong Liu * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
27653cd885bSSong Liu * function as an ops with IPMODIFY. Called
27753cd885bSSong Liu * when the other ops (the one with IPMODIFY)
27853cd885bSSong Liu * is being unregistered.
27953cd885bSSong Liu * This is called with direct_mutex locked.
28053cd885bSSong Liu */
28153cd885bSSong Liu enum ftrace_ops_cmd {
28253cd885bSSong Liu FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
28353cd885bSSong Liu FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
28453cd885bSSong Liu FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
28553cd885bSSong Liu };
28653cd885bSSong Liu
28753cd885bSSong Liu /*
28853cd885bSSong Liu * For most ftrace_ops_cmd,
28953cd885bSSong Liu * Returns:
29053cd885bSSong Liu * 0 - Success.
29153cd885bSSong Liu * Negative on failure. The return value is dependent on the
29253cd885bSSong Liu * callback.
29353cd885bSSong Liu */
29453cd885bSSong Liu typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
29553cd885bSSong Liu
29633b7f99cSSteven Rostedt (Red Hat) #ifdef CONFIG_DYNAMIC_FTRACE
29733b7f99cSSteven Rostedt (Red Hat) /* The hash used to know what functions callbacks trace */
29833b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash {
299f86f4180SChunyan Zhang struct ftrace_hash __rcu *notrace_hash;
300f86f4180SChunyan Zhang struct ftrace_hash __rcu *filter_hash;
30133b7f99cSSteven Rostedt (Red Hat) struct mutex regex_lock;
30233b7f99cSSteven Rostedt (Red Hat) };
30342c269c8SSteven Rostedt (VMware)
304b80f0f6cSSteven Rostedt (VMware) void ftrace_free_init_mem(void);
305aba4b5c2SSteven Rostedt (VMware) void ftrace_free_mem(struct module *mod, void *start, void *end);
30642c269c8SSteven Rostedt (VMware) #else
ftrace_free_init_mem(void)307380af29bSSteven Rostedt (Google) static inline void ftrace_free_init_mem(void)
308380af29bSSteven Rostedt (Google) {
309380af29bSSteven Rostedt (Google) ftrace_boot_snapshot();
310380af29bSSteven Rostedt (Google) }
ftrace_free_mem(struct module * mod,void * start,void * end)311aba4b5c2SSteven Rostedt (VMware) static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
31233b7f99cSSteven Rostedt (Red Hat) #endif
31333b7f99cSSteven Rostedt (Red Hat)
314b7e00a6cSSteven Rostedt (Red Hat) /*
315ba27f2bcSSteven Rostedt (Red Hat) * Note, ftrace_ops can be referenced outside of RCU protection, unless
316ba27f2bcSSteven Rostedt (Red Hat) * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
317ba27f2bcSSteven Rostedt (Red Hat) * core data, the unregistering of it will perform a scheduling on all CPUs
318ba27f2bcSSteven Rostedt (Red Hat) * to make sure that there are no more users. Depending on the load of the
319ba27f2bcSSteven Rostedt (Red Hat) * system that may take a bit of time.
320b7e00a6cSSteven Rostedt (Red Hat) *
321b7e00a6cSSteven Rostedt (Red Hat) * Any private data added must also take care not to be freed and if private
322b7e00a6cSSteven Rostedt (Red Hat) * data is added to a ftrace_ops that is in core code, the user of the
323b7e00a6cSSteven Rostedt (Red Hat) * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
324b7e00a6cSSteven Rostedt (Red Hat) */
32516444a8aSArnaldo Carvalho de Melo struct ftrace_ops {
32616444a8aSArnaldo Carvalho de Melo ftrace_func_t func;
327f86f4180SChunyan Zhang struct ftrace_ops __rcu *next;
328b848914cSSteven Rostedt unsigned long flags;
329b7e00a6cSSteven Rostedt (Red Hat) void *private;
330e3eea140SSteven Rostedt (Red Hat) ftrace_func_t saved_func;
331f45948e8SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
33233b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash local_hash;
33333b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash *func_hash;
334fef5aeeeSSteven Rostedt (Red Hat) struct ftrace_ops_hash old_hash;
33579922b80SSteven Rostedt (Red Hat) unsigned long trampoline;
336aec0be2dSSteven Rostedt (Red Hat) unsigned long trampoline_size;
337fc0ea795SAdrian Hunter struct list_head list;
33853cd885bSSong Liu ftrace_ops_func_t ops_func;
339dbaccb61SFlorent Revest #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
340dbaccb61SFlorent Revest unsigned long direct_call;
341dbaccb61SFlorent Revest #endif
342f45948e8SSteven Rostedt #endif
34316444a8aSArnaldo Carvalho de Melo };
34416444a8aSArnaldo Carvalho de Melo
34559566b0bSSteven Rostedt (VMware) extern struct ftrace_ops __rcu *ftrace_ops_list;
34659566b0bSSteven Rostedt (VMware) extern struct ftrace_ops ftrace_list_end;
34759566b0bSSteven Rostedt (VMware)
34859566b0bSSteven Rostedt (VMware) /*
34940dc4a42SWei Yang * Traverse the ftrace_ops_list, invoking all entries. The reason that we
35059566b0bSSteven Rostedt (VMware) * can use rcu_dereference_raw_check() is that elements removed from this list
35159566b0bSSteven Rostedt (VMware) * are simply leaked, so there is no need to interact with a grace-period
35259566b0bSSteven Rostedt (VMware) * mechanism. The rcu_dereference_raw_check() calls are needed to handle
35340dc4a42SWei Yang * concurrent insertions into the ftrace_ops_list.
35459566b0bSSteven Rostedt (VMware) *
35559566b0bSSteven Rostedt (VMware) * Silly Alpha and silly pointer-speculation compiler optimizations!
35659566b0bSSteven Rostedt (VMware) */
35759566b0bSSteven Rostedt (VMware) #define do_for_each_ftrace_op(op, list) \
35859566b0bSSteven Rostedt (VMware) op = rcu_dereference_raw_check(list); \
35959566b0bSSteven Rostedt (VMware) do
36059566b0bSSteven Rostedt (VMware)
36159566b0bSSteven Rostedt (VMware) /*
36259566b0bSSteven Rostedt (VMware) * Optimized for just a single item in the list (as that is the normal case).
36359566b0bSSteven Rostedt (VMware) */
36459566b0bSSteven Rostedt (VMware) #define while_for_each_ftrace_op(op) \
36559566b0bSSteven Rostedt (VMware) while (likely(op = rcu_dereference_raw_check((op)->next)) && \
36659566b0bSSteven Rostedt (VMware) unlikely((op) != &ftrace_list_end))
36759566b0bSSteven Rostedt (VMware)
368e7d3737eSFrederic Weisbecker /*
369e7d3737eSFrederic Weisbecker * Type of the current tracing.
370e7d3737eSFrederic Weisbecker */
371e7d3737eSFrederic Weisbecker enum ftrace_tracing_type_t {
372e7d3737eSFrederic Weisbecker FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
373e7d3737eSFrederic Weisbecker FTRACE_TYPE_RETURN, /* Hook the return of the function */
374e7d3737eSFrederic Weisbecker };
375e7d3737eSFrederic Weisbecker
376e7d3737eSFrederic Weisbecker /* Current tracing type, default is FTRACE_TYPE_ENTER */
377e7d3737eSFrederic Weisbecker extern enum ftrace_tracing_type_t ftrace_tracing_type;
378e7d3737eSFrederic Weisbecker
37916444a8aSArnaldo Carvalho de Melo /*
38016444a8aSArnaldo Carvalho de Melo * The ftrace_ops must be a static and should also
38116444a8aSArnaldo Carvalho de Melo * be read_mostly. These functions do modify read_mostly variables
38216444a8aSArnaldo Carvalho de Melo * so use them sparely. Never free an ftrace_op or modify the
38316444a8aSArnaldo Carvalho de Melo * next pointer after it has been registered. Even after unregistering
38416444a8aSArnaldo Carvalho de Melo * it, the next pointer may still be used internally.
38516444a8aSArnaldo Carvalho de Melo */
38616444a8aSArnaldo Carvalho de Melo int register_ftrace_function(struct ftrace_ops *ops);
38716444a8aSArnaldo Carvalho de Melo int unregister_ftrace_function(struct ftrace_ops *ops);
38816444a8aSArnaldo Carvalho de Melo
389a1e2e31dSSteven Rostedt extern void ftrace_stub(unsigned long a0, unsigned long a1,
390d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
39116444a8aSArnaldo Carvalho de Melo
392bed0d9a5SJiri Olsa
393bed0d9a5SJiri Olsa int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
394606576ceSSteven Rostedt #else /* !CONFIG_FUNCTION_TRACER */
3954dbf6bc2SSteven Rostedt /*
3964dbf6bc2SSteven Rostedt * (un)register_ftrace_function must be a macro since the ops parameter
3974dbf6bc2SSteven Rostedt * must not be evaluated.
3984dbf6bc2SSteven Rostedt */
3994dbf6bc2SSteven Rostedt #define register_ftrace_function(ops) ({ 0; })
4004dbf6bc2SSteven Rostedt #define unregister_ftrace_function(ops) ({ 0; })
ftrace_kill(void)40181adbdc0SSteven Rostedt static inline void ftrace_kill(void) { }
ftrace_free_init_mem(void)402b80f0f6cSSteven Rostedt (VMware) static inline void ftrace_free_init_mem(void) { }
ftrace_free_mem(struct module * mod,void * start,void * end)403aba4b5c2SSteven Rostedt (VMware) static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)404bed0d9a5SJiri Olsa static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
405bed0d9a5SJiri Olsa {
406bed0d9a5SJiri Olsa return -EOPNOTSUPP;
407bed0d9a5SJiri Olsa }
408606576ceSSteven Rostedt #endif /* CONFIG_FUNCTION_TRACER */
409352ad25aSSteven Rostedt
410ea806eb3SSteven Rostedt (VMware) struct ftrace_func_entry {
411ea806eb3SSteven Rostedt (VMware) struct hlist_node hlist;
412ea806eb3SSteven Rostedt (VMware) unsigned long ip;
413ea806eb3SSteven Rostedt (VMware) unsigned long direct; /* for direct lookup only */
414ea806eb3SSteven Rostedt (VMware) };
415ea806eb3SSteven Rostedt (VMware)
416763e34e7SSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
417a3ad1a7eSSteven Rostedt (VMware) extern int ftrace_direct_func_count;
418ff205766SAlexei Starovoitov unsigned long ftrace_find_rec_direct(unsigned long ip);
419da8bdfbdSFlorent Revest int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
420da8bdfbdSFlorent Revest int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
42159495740SFlorent Revest bool free_filters);
422da8bdfbdSFlorent Revest int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
423da8bdfbdSFlorent Revest int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
424ccf5a89eSJiri Olsa
425fee86a4eSMark Rutland void ftrace_stub_direct_tramp(void);
426f64dd462SJiri Olsa
427763e34e7SSteven Rostedt (VMware) #else
428f64dd462SJiri Olsa struct ftrace_ops;
429a3ad1a7eSSteven Rostedt (VMware) # define ftrace_direct_func_count 0
ftrace_find_rec_direct(unsigned long ip)430ff205766SAlexei Starovoitov static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
431ff205766SAlexei Starovoitov {
432ff205766SAlexei Starovoitov return 0;
433ff205766SAlexei Starovoitov }
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)434da8bdfbdSFlorent Revest static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
435f64dd462SJiri Olsa {
436f64dd462SJiri Olsa return -ENODEV;
437f64dd462SJiri Olsa }
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)438da8bdfbdSFlorent Revest static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
43959495740SFlorent Revest bool free_filters)
440f64dd462SJiri Olsa {
441f64dd462SJiri Olsa return -ENODEV;
442f64dd462SJiri Olsa }
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)443da8bdfbdSFlorent Revest static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
444ccf5a89eSJiri Olsa {
445ccf5a89eSJiri Olsa return -ENODEV;
446ccf5a89eSJiri Olsa }
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)447da8bdfbdSFlorent Revest static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
448f96f644aSSong Liu {
449f96f644aSSong Liu return -ENODEV;
450f96f644aSSong Liu }
451763e34e7SSteven Rostedt (VMware)
452763e34e7SSteven Rostedt (VMware) /*
453763e34e7SSteven Rostedt (VMware) * This must be implemented by the architecture.
454763e34e7SSteven Rostedt (VMware) * It is the way the ftrace direct_ops helper, when called
455763e34e7SSteven Rostedt (VMware) * via ftrace (because there's other callbacks besides the
456763e34e7SSteven Rostedt (VMware) * direct call), can inform the architecture's trampoline that this
457763e34e7SSteven Rostedt (VMware) * routine has a direct caller, and what the caller is.
458562955feSSteven Rostedt (VMware) *
459562955feSSteven Rostedt (VMware) * For example, in x86, it returns the direct caller
460562955feSSteven Rostedt (VMware) * callback function via the regs->orig_ax parameter.
461562955feSSteven Rostedt (VMware) * Then in the ftrace trampoline, if this is set, it makes
462562955feSSteven Rostedt (VMware) * the return from the trampoline jump to the direct caller
463562955feSSteven Rostedt (VMware) * instead of going back to the function it just traced.
464763e34e7SSteven Rostedt (VMware) */
arch_ftrace_set_direct_caller(struct ftrace_regs * fregs,unsigned long addr)4659705bc70SMark Rutland static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
466763e34e7SSteven Rostedt (VMware) unsigned long addr) { }
4679705bc70SMark Rutland #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
468763e34e7SSteven Rostedt (VMware)
469f38f1d2aSSteven Rostedt #ifdef CONFIG_STACK_TRACER
470bb99d8ccSAKASHI Takahiro
471f38f1d2aSSteven Rostedt extern int stack_tracer_enabled;
4723d9a8072SThomas Gleixner
4737ff0d449SChristoph Hellwig int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
4747ff0d449SChristoph Hellwig size_t *lenp, loff_t *ppos);
4755367278cSSteven Rostedt (VMware)
4768aaf1ee7SSteven Rostedt (VMware) /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
4778aaf1ee7SSteven Rostedt (VMware) DECLARE_PER_CPU(int, disable_stack_tracer);
4788aaf1ee7SSteven Rostedt (VMware)
4798aaf1ee7SSteven Rostedt (VMware) /**
4808aaf1ee7SSteven Rostedt (VMware) * stack_tracer_disable - temporarily disable the stack tracer
4818aaf1ee7SSteven Rostedt (VMware) *
4828aaf1ee7SSteven Rostedt (VMware) * There's a few locations (namely in RCU) where stack tracing
4838aaf1ee7SSteven Rostedt (VMware) * cannot be executed. This function is used to disable stack
4848aaf1ee7SSteven Rostedt (VMware) * tracing during those critical sections.
4858aaf1ee7SSteven Rostedt (VMware) *
4868aaf1ee7SSteven Rostedt (VMware) * This function must be called with preemption or interrupts
4878aaf1ee7SSteven Rostedt (VMware) * disabled and stack_tracer_enable() must be called shortly after
4888aaf1ee7SSteven Rostedt (VMware) * while preemption or interrupts are still disabled.
4898aaf1ee7SSteven Rostedt (VMware) */
stack_tracer_disable(void)4908aaf1ee7SSteven Rostedt (VMware) static inline void stack_tracer_disable(void)
4918aaf1ee7SSteven Rostedt (VMware) {
492f2cc020dSIngo Molnar /* Preemption or interrupts must be disabled */
49360361e12SZev Weiss if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
4948aaf1ee7SSteven Rostedt (VMware) WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
4958aaf1ee7SSteven Rostedt (VMware) this_cpu_inc(disable_stack_tracer);
4968aaf1ee7SSteven Rostedt (VMware) }
4978aaf1ee7SSteven Rostedt (VMware)
4988aaf1ee7SSteven Rostedt (VMware) /**
4998aaf1ee7SSteven Rostedt (VMware) * stack_tracer_enable - re-enable the stack tracer
5008aaf1ee7SSteven Rostedt (VMware) *
5018aaf1ee7SSteven Rostedt (VMware) * After stack_tracer_disable() is called, stack_tracer_enable()
5028aaf1ee7SSteven Rostedt (VMware) * must be called shortly afterward.
5038aaf1ee7SSteven Rostedt (VMware) */
stack_tracer_enable(void)5048aaf1ee7SSteven Rostedt (VMware) static inline void stack_tracer_enable(void)
5058aaf1ee7SSteven Rostedt (VMware) {
50660361e12SZev Weiss if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
5078aaf1ee7SSteven Rostedt (VMware) WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
5088aaf1ee7SSteven Rostedt (VMware) this_cpu_dec(disable_stack_tracer);
5098aaf1ee7SSteven Rostedt (VMware) }
5105367278cSSteven Rostedt (VMware) #else
stack_tracer_disable(void)5115367278cSSteven Rostedt (VMware) static inline void stack_tracer_disable(void) { }
stack_tracer_enable(void)5125367278cSSteven Rostedt (VMware) static inline void stack_tracer_enable(void) { }
513f38f1d2aSSteven Rostedt #endif
514f38f1d2aSSteven Rostedt
5153d083395SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
51631e88909SSteven Rostedt
5173a2bfec0SLi kunyu void ftrace_arch_code_modify_prepare(void);
5183a2bfec0SLi kunyu void ftrace_arch_code_modify_post_process(void);
519000ab691SSteven Rostedt
52002a392a0SSteven Rostedt (Red Hat) enum ftrace_bug_type {
52102a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_UNKNOWN,
52202a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_INIT,
52302a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_NOP,
52402a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_CALL,
52502a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_UPDATE,
52602a392a0SSteven Rostedt (Red Hat) };
52702a392a0SSteven Rostedt (Red Hat) extern enum ftrace_bug_type ftrace_bug_type;
52802a392a0SSteven Rostedt (Red Hat)
529b05086c7SSteven Rostedt (Red Hat) /*
530b05086c7SSteven Rostedt (Red Hat) * Archs can set this to point to a variable that holds the value that was
531b05086c7SSteven Rostedt (Red Hat) * expected at the call site before calling ftrace_bug().
532b05086c7SSteven Rostedt (Red Hat) */
533b05086c7SSteven Rostedt (Red Hat) extern const void *ftrace_expected;
534b05086c7SSteven Rostedt (Red Hat)
5354fd3279bSSteven Rostedt (Red Hat) void ftrace_bug(int err, struct dyn_ftrace *rec);
536c88fd863SSteven Rostedt
537809dcf29SSteven Rostedt struct seq_file;
538809dcf29SSteven Rostedt
539d88471cbSSasha Levin extern int ftrace_text_reserved(const void *start, const void *end);
5402cfa1978SMasami Hiramatsu
5416be7fa3cSSteven Rostedt (VMware) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
5426be7fa3cSSteven Rostedt (VMware)
543aec0be2dSSteven Rostedt (Red Hat) bool is_ftrace_trampoline(unsigned long addr);
544aec0be2dSSteven Rostedt (Red Hat)
54508f6fba5SSteven Rostedt /*
54608f6fba5SSteven Rostedt * The dyn_ftrace record's flags field is split into two parts.
54708f6fba5SSteven Rostedt * the first part which is '0-FTRACE_REF_MAX' is a counter of
54808f6fba5SSteven Rostedt * the number of callbacks that have registered the function that
54908f6fba5SSteven Rostedt * the dyn_ftrace descriptor represents.
55008f6fba5SSteven Rostedt *
55108f6fba5SSteven Rostedt * The second part is a mask:
55208f6fba5SSteven Rostedt * ENABLED - the function is being traced
55308f6fba5SSteven Rostedt * REGS - the record wants the function to save regs
55408f6fba5SSteven Rostedt * REGS_EN - the function is set up to save regs.
555f8b8be8aSMasami Hiramatsu * IPMODIFY - the record allows for the IP address to be changed.
556b7ffffbbSSteven Rostedt (Red Hat) * DISABLED - the record is not ready to be touched yet
557763e34e7SSteven Rostedt (VMware) * DIRECT - there is a direct function to call
558cbad0fb2SMark Rutland * CALL_OPS - the record can use callsite-specific ops
559cbad0fb2SMark Rutland * CALL_OPS_EN - the function is set up to use callsite-specific ops
560e11b521aSSteven Rostedt (Google) * TOUCHED - A callback was added since boot up
5616ce2c04fSSteven Rostedt (Google) * MODIFIED - The function had IPMODIFY or DIRECT attached to it
56208f6fba5SSteven Rostedt *
56308f6fba5SSteven Rostedt * When a new ftrace_ops is registered and wants a function to save
56402dae28fSWei Yang * pt_regs, the rec->flags REGS is set. When the function has been
56508f6fba5SSteven Rostedt * set up to save regs, the REG_EN flag is set. Once a function
56608f6fba5SSteven Rostedt * starts saving regs it will do so until all ftrace_ops are removed
56708f6fba5SSteven Rostedt * from tracing that function.
56808f6fba5SSteven Rostedt */
5693c1720f0SSteven Rostedt enum {
57079922b80SSteven Rostedt (Red Hat) FTRACE_FL_ENABLED = (1UL << 31),
57108f6fba5SSteven Rostedt FTRACE_FL_REGS = (1UL << 30),
57279922b80SSteven Rostedt (Red Hat) FTRACE_FL_REGS_EN = (1UL << 29),
57379922b80SSteven Rostedt (Red Hat) FTRACE_FL_TRAMP = (1UL << 28),
57479922b80SSteven Rostedt (Red Hat) FTRACE_FL_TRAMP_EN = (1UL << 27),
575f8b8be8aSMasami Hiramatsu FTRACE_FL_IPMODIFY = (1UL << 26),
576b7ffffbbSSteven Rostedt (Red Hat) FTRACE_FL_DISABLED = (1UL << 25),
577763e34e7SSteven Rostedt (VMware) FTRACE_FL_DIRECT = (1UL << 24),
578763e34e7SSteven Rostedt (VMware) FTRACE_FL_DIRECT_EN = (1UL << 23),
579cbad0fb2SMark Rutland FTRACE_FL_CALL_OPS = (1UL << 22),
580cbad0fb2SMark Rutland FTRACE_FL_CALL_OPS_EN = (1UL << 21),
581e11b521aSSteven Rostedt (Google) FTRACE_FL_TOUCHED = (1UL << 20),
5826ce2c04fSSteven Rostedt (Google) FTRACE_FL_MODIFIED = (1UL << 19),
5833c1720f0SSteven Rostedt };
5843c1720f0SSteven Rostedt
5856ce2c04fSSteven Rostedt (Google) #define FTRACE_REF_MAX_SHIFT 19
586cf2cb0b2SSteven Rostedt (Red Hat) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
587ed926f9bSSteven Rostedt
58802dae28fSWei Yang #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
5890376bde1SSteven Rostedt (Red Hat)
5903d083395SSteven Rostedt struct dyn_ftrace {
591395a59d0SAbhishek Sagar unsigned long ip; /* address of mcount call-site */
5923c1720f0SSteven Rostedt unsigned long flags;
59331e88909SSteven Rostedt struct dyn_arch_ftrace arch;
5943d083395SSteven Rostedt };
5953d083395SSteven Rostedt
596647664eaSMasami Hiramatsu int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
597647664eaSMasami Hiramatsu int remove, int reset);
5984f554e95SJiri Olsa int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5994f554e95SJiri Olsa unsigned int cnt, int remove, int reset);
600ac483c44SJiri Olsa int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
601936e074bSSteven Rostedt int len, int reset);
602ac483c44SJiri Olsa int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
603936e074bSSteven Rostedt int len, int reset);
604936e074bSSteven Rostedt void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
605936e074bSSteven Rostedt void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
6065500fa51SJiri Olsa void ftrace_free_filter(struct ftrace_ops *ops);
607d032ae89SJoel Fernandes void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
608e1c08bddSSteven Rostedt
609c88fd863SSteven Rostedt enum {
610c88fd863SSteven Rostedt FTRACE_UPDATE_CALLS = (1 << 0),
611c88fd863SSteven Rostedt FTRACE_DISABLE_CALLS = (1 << 1),
612c88fd863SSteven Rostedt FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
613c88fd863SSteven Rostedt FTRACE_START_FUNC_RET = (1 << 3),
614c88fd863SSteven Rostedt FTRACE_STOP_FUNC_RET = (1 << 4),
615a0572f68SSteven Rostedt (VMware) FTRACE_MAY_SLEEP = (1 << 5),
616c88fd863SSteven Rostedt };
617c88fd863SSteven Rostedt
61808f6fba5SSteven Rostedt /*
61908f6fba5SSteven Rostedt * The FTRACE_UPDATE_* enum is used to pass information back
62008f6fba5SSteven Rostedt * from the ftrace_update_record() and ftrace_test_record()
62108f6fba5SSteven Rostedt * functions. These are called by the code update routines
62208f6fba5SSteven Rostedt * to find out what is to be done for a given function.
62308f6fba5SSteven Rostedt *
62408f6fba5SSteven Rostedt * IGNORE - The function is already what we want it to be
62508f6fba5SSteven Rostedt * MAKE_CALL - Start tracing the function
62608f6fba5SSteven Rostedt * MODIFY_CALL - Stop saving regs for the function
62708f6fba5SSteven Rostedt * MAKE_NOP - Stop tracing the function
62808f6fba5SSteven Rostedt */
629c88fd863SSteven Rostedt enum {
630c88fd863SSteven Rostedt FTRACE_UPDATE_IGNORE,
631c88fd863SSteven Rostedt FTRACE_UPDATE_MAKE_CALL,
63208f6fba5SSteven Rostedt FTRACE_UPDATE_MODIFY_CALL,
633c88fd863SSteven Rostedt FTRACE_UPDATE_MAKE_NOP,
634c88fd863SSteven Rostedt };
635c88fd863SSteven Rostedt
636fc13cb0cSSteven Rostedt enum {
637fc13cb0cSSteven Rostedt FTRACE_ITER_FILTER = (1 << 0),
638fc13cb0cSSteven Rostedt FTRACE_ITER_NOTRACE = (1 << 1),
639fc13cb0cSSteven Rostedt FTRACE_ITER_PRINTALL = (1 << 2),
640eee8ded1SSteven Rostedt (VMware) FTRACE_ITER_DO_PROBES = (1 << 3),
641eee8ded1SSteven Rostedt (VMware) FTRACE_ITER_PROBE = (1 << 4),
6425985ea8bSSteven Rostedt (VMware) FTRACE_ITER_MOD = (1 << 5),
6435985ea8bSSteven Rostedt (VMware) FTRACE_ITER_ENABLED = (1 << 6),
644e11b521aSSteven Rostedt (Google) FTRACE_ITER_TOUCHED = (1 << 7),
64583f74441SJiri Olsa FTRACE_ITER_ADDRS = (1 << 8),
646fc13cb0cSSteven Rostedt };
647fc13cb0cSSteven Rostedt
648c88fd863SSteven Rostedt void arch_ftrace_update_code(int command);
64989f579ceSYi Wang void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
65089f579ceSYi Wang void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
65189f579ceSYi Wang void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
652c88fd863SSteven Rostedt
653c88fd863SSteven Rostedt struct ftrace_rec_iter;
654c88fd863SSteven Rostedt
655c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_start(void);
656c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
657c88fd863SSteven Rostedt struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
658c88fd863SSteven Rostedt
65908d636b6SSteven Rostedt #define for_ftrace_rec_iter(iter) \
66008d636b6SSteven Rostedt for (iter = ftrace_rec_iter_start(); \
66108d636b6SSteven Rostedt iter; \
66208d636b6SSteven Rostedt iter = ftrace_rec_iter_next(iter))
66308d636b6SSteven Rostedt
66408d636b6SSteven Rostedt
6657375dca1SSteven Rostedt (VMware) int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
6667375dca1SSteven Rostedt (VMware) int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
667c88fd863SSteven Rostedt void ftrace_run_stop_machine(int command);
668f0cf973aSSteven Rostedt unsigned long ftrace_location(unsigned long ip);
66904cf31a7SMichael Ellerman unsigned long ftrace_location_range(unsigned long start, unsigned long end);
6707413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
6717413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
672c88fd863SSteven Rostedt
673c88fd863SSteven Rostedt extern ftrace_func_t ftrace_trace_function;
674c88fd863SSteven Rostedt
675fc13cb0cSSteven Rostedt int ftrace_regex_open(struct ftrace_ops *ops, int flag,
676fc13cb0cSSteven Rostedt struct inode *inode, struct file *file);
677fc13cb0cSSteven Rostedt ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
678fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos);
679fc13cb0cSSteven Rostedt ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
680fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos);
681fc13cb0cSSteven Rostedt int ftrace_regex_release(struct inode *inode, struct file *file);
682fc13cb0cSSteven Rostedt
6832a85a37fSSteven Rostedt void __init
6842a85a37fSSteven Rostedt ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
6852a85a37fSSteven Rostedt
6863d083395SSteven Rostedt /* defined in arch */
6873a36cb11SJiri Slaby extern int ftrace_dyn_arch_init(void);
688e4f5d544SSteven Rostedt extern void ftrace_replace_code(int enable);
689d61f82d0SSteven Rostedt extern int ftrace_update_ftrace_func(ftrace_func_t func);
690d61f82d0SSteven Rostedt extern void ftrace_caller(void);
69108f6fba5SSteven Rostedt extern void ftrace_regs_caller(void);
692d61f82d0SSteven Rostedt extern void ftrace_call(void);
69308f6fba5SSteven Rostedt extern void ftrace_regs_call(void);
694d61f82d0SSteven Rostedt extern void mcount_call(void);
695f0001207SShaohua Li
6968ed3e2cfSSteven Rostedt void ftrace_modify_all_code(int command);
6978ed3e2cfSSteven Rostedt
698f0001207SShaohua Li #ifndef FTRACE_ADDR
699f0001207SShaohua Li #define FTRACE_ADDR ((unsigned long)ftrace_caller)
700f0001207SShaohua Li #endif
70108f6fba5SSteven Rostedt
70279922b80SSteven Rostedt (Red Hat) #ifndef FTRACE_GRAPH_ADDR
70379922b80SSteven Rostedt (Red Hat) #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
70479922b80SSteven Rostedt (Red Hat) #endif
70579922b80SSteven Rostedt (Red Hat)
70608f6fba5SSteven Rostedt #ifndef FTRACE_REGS_ADDR
70706aeaaeaSMasami Hiramatsu #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
70808f6fba5SSteven Rostedt # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
70908f6fba5SSteven Rostedt #else
71008f6fba5SSteven Rostedt # define FTRACE_REGS_ADDR FTRACE_ADDR
71108f6fba5SSteven Rostedt #endif
71208f6fba5SSteven Rostedt #endif
71308f6fba5SSteven Rostedt
714646d7043SSteven Rostedt (Red Hat) /*
715646d7043SSteven Rostedt (Red Hat) * If an arch would like functions that are only traced
716646d7043SSteven Rostedt (Red Hat) * by the function graph tracer to jump directly to its own
717646d7043SSteven Rostedt (Red Hat) * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
718646d7043SSteven Rostedt (Red Hat) * to be that address to jump to.
719646d7043SSteven Rostedt (Red Hat) */
720646d7043SSteven Rostedt (Red Hat) #ifndef FTRACE_GRAPH_TRAMP_ADDR
721646d7043SSteven Rostedt (Red Hat) #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
722646d7043SSteven Rostedt (Red Hat) #endif
723646d7043SSteven Rostedt (Red Hat)
724fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
725fb52607aSFrederic Weisbecker extern void ftrace_graph_caller(void);
7265a45cfe1SSteven Rostedt extern int ftrace_enable_ftrace_graph_caller(void);
7275a45cfe1SSteven Rostedt extern int ftrace_disable_ftrace_graph_caller(void);
7285a45cfe1SSteven Rostedt #else
ftrace_enable_ftrace_graph_caller(void)7295a45cfe1SSteven Rostedt static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
ftrace_disable_ftrace_graph_caller(void)7305a45cfe1SSteven Rostedt static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
731e7d3737eSFrederic Weisbecker #endif
732ad90c0e3SSteven Rostedt
733593eb8a2SSteven Rostedt /**
73457794a9dSWenji Huang * ftrace_make_nop - convert code into nop
73531e88909SSteven Rostedt * @mod: module structure if called by module load initialization
736fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
73731e88909SSteven Rostedt * @addr: the address that the call site should be calling
738593eb8a2SSteven Rostedt *
739593eb8a2SSteven Rostedt * This is a very sensitive operation and great care needs
740593eb8a2SSteven Rostedt * to be taken by the arch. The operation should carefully
741593eb8a2SSteven Rostedt * read the location, check to see if what is read is indeed
742593eb8a2SSteven Rostedt * what we expect it to be, and then on success of the compare,
743593eb8a2SSteven Rostedt * it should write to the location.
744593eb8a2SSteven Rostedt *
74531e88909SSteven Rostedt * The code segment at @rec->ip should be a caller to @addr
74631e88909SSteven Rostedt *
747593eb8a2SSteven Rostedt * Return must be:
748593eb8a2SSteven Rostedt * 0 on success
749593eb8a2SSteven Rostedt * -EFAULT on error reading the location
750593eb8a2SSteven Rostedt * -EINVAL on a failed compare of the contents
751593eb8a2SSteven Rostedt * -EPERM on error writing to the location
752593eb8a2SSteven Rostedt * Any other value will be considered a failure.
753593eb8a2SSteven Rostedt */
75431e88909SSteven Rostedt extern int ftrace_make_nop(struct module *mod,
75531e88909SSteven Rostedt struct dyn_ftrace *rec, unsigned long addr);
75631e88909SSteven Rostedt
75767ccddf8SIlya Leoshkevich /**
75867ccddf8SIlya Leoshkevich * ftrace_need_init_nop - return whether nop call sites should be initialized
75967ccddf8SIlya Leoshkevich *
76067ccddf8SIlya Leoshkevich * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
76167ccddf8SIlya Leoshkevich * need to call ftrace_init_nop() if the code is built with that flag.
76267ccddf8SIlya Leoshkevich * Architectures where this is not always the case may define their own
76367ccddf8SIlya Leoshkevich * condition.
76467ccddf8SIlya Leoshkevich *
76567ccddf8SIlya Leoshkevich * Return must be:
76667ccddf8SIlya Leoshkevich * 0 if ftrace_init_nop() should be called
76767ccddf8SIlya Leoshkevich * Nonzero if ftrace_init_nop() should not be called
76867ccddf8SIlya Leoshkevich */
76967ccddf8SIlya Leoshkevich
77067ccddf8SIlya Leoshkevich #ifndef ftrace_need_init_nop
77167ccddf8SIlya Leoshkevich #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
77267ccddf8SIlya Leoshkevich #endif
773fbf6c73cSMark Rutland
774fbf6c73cSMark Rutland /**
775fbf6c73cSMark Rutland * ftrace_init_nop - initialize a nop call site
776fbf6c73cSMark Rutland * @mod: module structure if called by module load initialization
777fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
778fbf6c73cSMark Rutland *
779fbf6c73cSMark Rutland * This is a very sensitive operation and great care needs
780fbf6c73cSMark Rutland * to be taken by the arch. The operation should carefully
781fbf6c73cSMark Rutland * read the location, check to see if what is read is indeed
782fbf6c73cSMark Rutland * what we expect it to be, and then on success of the compare,
783fbf6c73cSMark Rutland * it should write to the location.
784fbf6c73cSMark Rutland *
785fbf6c73cSMark Rutland * The code segment at @rec->ip should contain the contents created by
786fbf6c73cSMark Rutland * the compiler
787fbf6c73cSMark Rutland *
788fbf6c73cSMark Rutland * Return must be:
789fbf6c73cSMark Rutland * 0 on success
790fbf6c73cSMark Rutland * -EFAULT on error reading the location
791fbf6c73cSMark Rutland * -EINVAL on a failed compare of the contents
792fbf6c73cSMark Rutland * -EPERM on error writing to the location
793fbf6c73cSMark Rutland * Any other value will be considered a failure.
794fbf6c73cSMark Rutland */
795fbf6c73cSMark Rutland #ifndef ftrace_init_nop
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)796fbf6c73cSMark Rutland static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
797fbf6c73cSMark Rutland {
798fbf6c73cSMark Rutland return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
799fbf6c73cSMark Rutland }
800fbf6c73cSMark Rutland #endif
801fbf6c73cSMark Rutland
80231e88909SSteven Rostedt /**
80331e88909SSteven Rostedt * ftrace_make_call - convert a nop call site into a call to addr
804fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
80531e88909SSteven Rostedt * @addr: the address that the call site should call
80631e88909SSteven Rostedt *
80731e88909SSteven Rostedt * This is a very sensitive operation and great care needs
80831e88909SSteven Rostedt * to be taken by the arch. The operation should carefully
80931e88909SSteven Rostedt * read the location, check to see if what is read is indeed
81031e88909SSteven Rostedt * what we expect it to be, and then on success of the compare,
81131e88909SSteven Rostedt * it should write to the location.
81231e88909SSteven Rostedt *
81331e88909SSteven Rostedt * The code segment at @rec->ip should be a nop
81431e88909SSteven Rostedt *
81531e88909SSteven Rostedt * Return must be:
81631e88909SSteven Rostedt * 0 on success
81731e88909SSteven Rostedt * -EFAULT on error reading the location
81831e88909SSteven Rostedt * -EINVAL on a failed compare of the contents
81931e88909SSteven Rostedt * -EPERM on error writing to the location
82031e88909SSteven Rostedt * Any other value will be considered a failure.
82131e88909SSteven Rostedt */
82231e88909SSteven Rostedt extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
82331e88909SSteven Rostedt
824cbad0fb2SMark Rutland #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
825cbad0fb2SMark Rutland defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)
82608f6fba5SSteven Rostedt /**
82708f6fba5SSteven Rostedt * ftrace_modify_call - convert from one addr to another (no nop)
828fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
82908f6fba5SSteven Rostedt * @old_addr: the address expected to be currently called to
83008f6fba5SSteven Rostedt * @addr: the address to change to
83108f6fba5SSteven Rostedt *
83208f6fba5SSteven Rostedt * This is a very sensitive operation and great care needs
83308f6fba5SSteven Rostedt * to be taken by the arch. The operation should carefully
83408f6fba5SSteven Rostedt * read the location, check to see if what is read is indeed
83508f6fba5SSteven Rostedt * what we expect it to be, and then on success of the compare,
83608f6fba5SSteven Rostedt * it should write to the location.
83708f6fba5SSteven Rostedt *
838cbad0fb2SMark Rutland * When using call ops, this is called when the associated ops change, even
839cbad0fb2SMark Rutland * when (addr == old_addr).
840cbad0fb2SMark Rutland *
84108f6fba5SSteven Rostedt * The code segment at @rec->ip should be a caller to @old_addr
84208f6fba5SSteven Rostedt *
84308f6fba5SSteven Rostedt * Return must be:
84408f6fba5SSteven Rostedt * 0 on success
84508f6fba5SSteven Rostedt * -EFAULT on error reading the location
84608f6fba5SSteven Rostedt * -EINVAL on a failed compare of the contents
84708f6fba5SSteven Rostedt * -EPERM on error writing to the location
84808f6fba5SSteven Rostedt * Any other value will be considered a failure.
84908f6fba5SSteven Rostedt */
85008f6fba5SSteven Rostedt extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
85108f6fba5SSteven Rostedt unsigned long addr);
85208f6fba5SSteven Rostedt #else
85308f6fba5SSteven Rostedt /* Should never be called */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)85408f6fba5SSteven Rostedt static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
85508f6fba5SSteven Rostedt unsigned long addr)
85608f6fba5SSteven Rostedt {
85708f6fba5SSteven Rostedt return -EINVAL;
85808f6fba5SSteven Rostedt }
85908f6fba5SSteven Rostedt #endif
86008f6fba5SSteven Rostedt
861ecea656dSAbhishek Sagar extern int skip_trace(unsigned long ip);
862a949ae56SSteven Rostedt (Red Hat) extern void ftrace_module_init(struct module *mod);
8637dcd182bSJessica Yu extern void ftrace_module_enable(struct module *mod);
864049fb9bdSSteven Rostedt (Red Hat) extern void ftrace_release_mod(struct module *mod);
8654dc93676SSteven Rostedt #else /* CONFIG_DYNAMIC_FTRACE */
skip_trace(unsigned long ip)8664dbf6bc2SSteven Rostedt static inline int skip_trace(unsigned long ip) { return 0; }
ftrace_module_init(struct module * mod)867a949ae56SSteven Rostedt (Red Hat) static inline void ftrace_module_init(struct module *mod) { }
ftrace_module_enable(struct module * mod)8687dcd182bSJessica Yu static inline void ftrace_module_enable(struct module *mod) { }
ftrace_release_mod(struct module * mod)8697dcd182bSJessica Yu static inline void ftrace_release_mod(struct module *mod) { }
ftrace_text_reserved(const void * start,const void * end)870d88471cbSSasha Levin static inline int ftrace_text_reserved(const void *start, const void *end)
8712cfa1978SMasami Hiramatsu {
8722cfa1978SMasami Hiramatsu return 0;
8732cfa1978SMasami Hiramatsu }
ftrace_location(unsigned long ip)8744dc93676SSteven Rostedt static inline unsigned long ftrace_location(unsigned long ip)
8754dc93676SSteven Rostedt {
8764dc93676SSteven Rostedt return 0;
8774dc93676SSteven Rostedt }
878fc13cb0cSSteven Rostedt
879fc13cb0cSSteven Rostedt /*
880fc13cb0cSSteven Rostedt * Again users of functions that have ftrace_ops may not
881fc13cb0cSSteven Rostedt * have them defined when ftrace is not enabled, but these
882fc13cb0cSSteven Rostedt * functions may still be called. Use a macro instead of inline.
883fc13cb0cSSteven Rostedt */
884fc13cb0cSSteven Rostedt #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
88596de37b6SSteven Rostedt #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
886647664eaSMasami Hiramatsu #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
8874f554e95SJiri Olsa #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
8885500fa51SJiri Olsa #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
8895500fa51SJiri Olsa #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
8905500fa51SJiri Olsa #define ftrace_free_filter(ops) do { } while (0)
891d032ae89SJoel Fernandes #define ftrace_ops_set_global_filter(ops) do { } while (0)
892fc13cb0cSSteven Rostedt
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)893fc13cb0cSSteven Rostedt static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
894fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos) { return -ENODEV; }
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)895fc13cb0cSSteven Rostedt static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
896fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos) { return -ENODEV; }
897fc13cb0cSSteven Rostedt static inline int
ftrace_regex_release(struct inode * inode,struct file * file)898fc13cb0cSSteven Rostedt ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
899aec0be2dSSteven Rostedt (Red Hat)
is_ftrace_trampoline(unsigned long addr)900aec0be2dSSteven Rostedt (Red Hat) static inline bool is_ftrace_trampoline(unsigned long addr)
901aec0be2dSSteven Rostedt (Red Hat) {
902aec0be2dSSteven Rostedt (Red Hat) return false;
903aec0be2dSSteven Rostedt (Red Hat) }
904ecea656dSAbhishek Sagar #endif /* CONFIG_DYNAMIC_FTRACE */
905352ad25aSSteven Rostedt
9060c0593b4SSteven Rostedt (VMware) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9070c0593b4SSteven Rostedt (VMware) #ifndef ftrace_graph_func
9080c0593b4SSteven Rostedt (VMware) #define ftrace_graph_func ftrace_stub
9090c0593b4SSteven Rostedt (VMware) #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
9100c0593b4SSteven Rostedt (VMware) #else
9110c0593b4SSteven Rostedt (VMware) #define FTRACE_OPS_GRAPH_STUB 0
9120c0593b4SSteven Rostedt (VMware) #endif
9130c0593b4SSteven Rostedt (VMware) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
9140c0593b4SSteven Rostedt (VMware)
915aeaee8a2SIngo Molnar /* totally disable ftrace - can not re-enable after this */
916aeaee8a2SIngo Molnar void ftrace_kill(void);
917aeaee8a2SIngo Molnar
tracer_disable(void)918f43fdad8SIngo Molnar static inline void tracer_disable(void)
919f43fdad8SIngo Molnar {
920606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
921f43fdad8SIngo Molnar ftrace_enabled = 0;
922f43fdad8SIngo Molnar #endif
923f43fdad8SIngo Molnar }
924f43fdad8SIngo Molnar
92537002735SHuang Ying /*
92637002735SHuang Ying * Ftrace disable/restore without lock. Some synchronization mechanism
9279bdeb7b5SHuang Ying * must be used to prevent ftrace_enabled to be changed between
92837002735SHuang Ying * disable/restore.
92937002735SHuang Ying */
__ftrace_enabled_save(void)9309bdeb7b5SHuang Ying static inline int __ftrace_enabled_save(void)
9319bdeb7b5SHuang Ying {
932606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
9339bdeb7b5SHuang Ying int saved_ftrace_enabled = ftrace_enabled;
9349bdeb7b5SHuang Ying ftrace_enabled = 0;
9359bdeb7b5SHuang Ying return saved_ftrace_enabled;
9369bdeb7b5SHuang Ying #else
9379bdeb7b5SHuang Ying return 0;
9389bdeb7b5SHuang Ying #endif
9399bdeb7b5SHuang Ying }
9409bdeb7b5SHuang Ying
__ftrace_enabled_restore(int enabled)9419bdeb7b5SHuang Ying static inline void __ftrace_enabled_restore(int enabled)
9429bdeb7b5SHuang Ying {
943606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
9449bdeb7b5SHuang Ying ftrace_enabled = enabled;
9459bdeb7b5SHuang Ying #endif
9469bdeb7b5SHuang Ying }
9479bdeb7b5SHuang Ying
948eed542d6SAKASHI Takahiro /* All archs should have this, but we define it for consistency */
949eed542d6SAKASHI Takahiro #ifndef ftrace_return_address0
950eed542d6SAKASHI Takahiro # define ftrace_return_address0 __builtin_return_address(0)
951352ad25aSSteven Rostedt #endif
952eed542d6SAKASHI Takahiro
953eed542d6SAKASHI Takahiro /* Archs may use other ways for ADDR1 and beyond */
954eed542d6SAKASHI Takahiro #ifndef ftrace_return_address
955eed542d6SAKASHI Takahiro # ifdef CONFIG_FRAME_POINTER
956eed542d6SAKASHI Takahiro # define ftrace_return_address(n) __builtin_return_address(n)
957eed542d6SAKASHI Takahiro # else
958eed542d6SAKASHI Takahiro # define ftrace_return_address(n) 0UL
959eed542d6SAKASHI Takahiro # endif
960eed542d6SAKASHI Takahiro #endif
961eed542d6SAKASHI Takahiro
962eed542d6SAKASHI Takahiro #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
963eed542d6SAKASHI Takahiro #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
964eed542d6SAKASHI Takahiro #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
965eed542d6SAKASHI Takahiro #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
966eed542d6SAKASHI Takahiro #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
967eed542d6SAKASHI Takahiro #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
968eed542d6SAKASHI Takahiro #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
969352ad25aSSteven Rostedt
get_lock_parent_ip(void)970ea65b418SJohn Keeping static __always_inline unsigned long get_lock_parent_ip(void)
971f904f582SSebastian Andrzej Siewior {
972f904f582SSebastian Andrzej Siewior unsigned long addr = CALLER_ADDR0;
973f904f582SSebastian Andrzej Siewior
974f904f582SSebastian Andrzej Siewior if (!in_lock_functions(addr))
975f904f582SSebastian Andrzej Siewior return addr;
976f904f582SSebastian Andrzej Siewior addr = CALLER_ADDR1;
977f904f582SSebastian Andrzej Siewior if (!in_lock_functions(addr))
978f904f582SSebastian Andrzej Siewior return addr;
979f904f582SSebastian Andrzej Siewior return CALLER_ADDR2;
980f904f582SSebastian Andrzej Siewior }
981f904f582SSebastian Andrzej Siewior
982c3bc8fd6SJoel Fernandes (Google) #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
983489f1396SIngo Molnar extern void trace_preempt_on(unsigned long a0, unsigned long a1);
984489f1396SIngo Molnar extern void trace_preempt_off(unsigned long a0, unsigned long a1);
9856cd8a4bbSSteven Rostedt #else
986b02ee9a3SMinho Ban /*
987b02ee9a3SMinho Ban * Use defines instead of static inlines because some arches will make code out
988b02ee9a3SMinho Ban * of the CALLER_ADDR, when we really want these to be a real nop.
989b02ee9a3SMinho Ban */
990b02ee9a3SMinho Ban # define trace_preempt_on(a0, a1) do { } while (0)
991b02ee9a3SMinho Ban # define trace_preempt_off(a0, a1) do { } while (0)
9926cd8a4bbSSteven Rostedt #endif
9936cd8a4bbSSteven Rostedt
99468bf21aaSSteven Rostedt #ifdef CONFIG_FTRACE_MCOUNT_RECORD
99568bf21aaSSteven Rostedt extern void ftrace_init(void);
996a1326b17SMark Rutland #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
997a1326b17SMark Rutland #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
998a1326b17SMark Rutland #else
999a1326b17SMark Rutland #define FTRACE_CALLSITE_SECTION "__mcount_loc"
1000a1326b17SMark Rutland #endif
100168bf21aaSSteven Rostedt #else
ftrace_init(void)100268bf21aaSSteven Rostedt static inline void ftrace_init(void) { }
100368bf21aaSSteven Rostedt #endif
100468bf21aaSSteven Rostedt
100571566a0dSFrederic Weisbecker /*
1006287b6e68SFrederic Weisbecker * Structure that defines an entry function trace.
1007a4a551b8SNamhyung Kim * It's already packed but the attribute "packed" is needed
1008a4a551b8SNamhyung Kim * to remove extra padding at the end.
1009287b6e68SFrederic Weisbecker */
1010287b6e68SFrederic Weisbecker struct ftrace_graph_ent {
1011287b6e68SFrederic Weisbecker unsigned long func; /* Current function */
1012287b6e68SFrederic Weisbecker int depth;
1013a4a551b8SNamhyung Kim } __packed;
1014dd0e545fSSteven Rostedt
101571566a0dSFrederic Weisbecker /*
1016caf4b323SFrederic Weisbecker * Structure that defines a return function trace.
1017a4a551b8SNamhyung Kim * It's already packed but the attribute "packed" is needed
1018a4a551b8SNamhyung Kim * to remove extra padding at the end.
1019caf4b323SFrederic Weisbecker */
1020fb52607aSFrederic Weisbecker struct ftrace_graph_ret {
1021caf4b323SFrederic Weisbecker unsigned long func; /* Current function */
1022a1be9cccSDonglin Peng #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
1023a1be9cccSDonglin Peng unsigned long retval;
1024a1be9cccSDonglin Peng #endif
102560602cb5SSteven Rostedt (VMware) int depth;
10260231022cSFrederic Weisbecker /* Number of functions that overran the depth limit for current task */
102760602cb5SSteven Rostedt (VMware) unsigned int overrun;
1028a4a551b8SNamhyung Kim unsigned long long calltime;
1029a4a551b8SNamhyung Kim unsigned long long rettime;
1030a4a551b8SNamhyung Kim } __packed;
1031caf4b323SFrederic Weisbecker
103262b915f1SJiri Olsa /* Type of the callback handlers for tracing function graph*/
103362b915f1SJiri Olsa typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
103462b915f1SJiri Olsa typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
103562b915f1SJiri Olsa
1036e8025babSSteven Rostedt (VMware) extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1037e8025babSSteven Rostedt (VMware)
1038fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10398b96f011SFrederic Weisbecker
1040688f7089SSteven Rostedt (VMware) struct fgraph_ops {
1041688f7089SSteven Rostedt (VMware) trace_func_graph_ent_t entryfunc;
1042688f7089SSteven Rostedt (VMware) trace_func_graph_ret_t retfunc;
1043688f7089SSteven Rostedt (VMware) };
1044688f7089SSteven Rostedt (VMware)
10458b96f011SFrederic Weisbecker /*
1046712406a6SSteven Rostedt * Stack of return addresses for functions
1047712406a6SSteven Rostedt * of a thread.
1048712406a6SSteven Rostedt * Used in struct thread_info
1049712406a6SSteven Rostedt */
1050712406a6SSteven Rostedt struct ftrace_ret_stack {
1051712406a6SSteven Rostedt unsigned long ret;
1052712406a6SSteven Rostedt unsigned long func;
1053712406a6SSteven Rostedt unsigned long long calltime;
10548861dd30SNamhyung Kim #ifdef CONFIG_FUNCTION_PROFILER
1055a2a16d6aSSteven Rostedt unsigned long long subtime;
10568861dd30SNamhyung Kim #endif
1057daa460a8SJosh Poimboeuf #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
105871e308a2SSteven Rostedt unsigned long fp;
1059daa460a8SJosh Poimboeuf #endif
10609a7c348bSJosh Poimboeuf #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
10619a7c348bSJosh Poimboeuf unsigned long *retp;
10629a7c348bSJosh Poimboeuf #endif
1063712406a6SSteven Rostedt };
1064712406a6SSteven Rostedt
1065712406a6SSteven Rostedt /*
1066712406a6SSteven Rostedt * Primary handler of a function return.
1067712406a6SSteven Rostedt * It relays on ftrace_return_to_handler.
1068712406a6SSteven Rostedt * Defined in entry_32/64.S
1069712406a6SSteven Rostedt */
1070712406a6SSteven Rostedt extern void return_to_handler(void);
1071712406a6SSteven Rostedt
1072712406a6SSteven Rostedt extern int
10738114865fSSteven Rostedt (VMware) function_graph_enter(unsigned long ret, unsigned long func,
10749a7c348bSJosh Poimboeuf unsigned long frame_pointer, unsigned long *retp);
1075712406a6SSteven Rostedt
1076b0e21a61SSteven Rostedt (VMware) struct ftrace_ret_stack *
1077b0e21a61SSteven Rostedt (VMware) ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1078b0e21a61SSteven Rostedt (VMware)
1079223918e3SJosh Poimboeuf unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1080223918e3SJosh Poimboeuf unsigned long ret, unsigned long *retp);
1081223918e3SJosh Poimboeuf
1082712406a6SSteven Rostedt /*
10838b96f011SFrederic Weisbecker * Sometimes we don't want to trace a function with the function
10848b96f011SFrederic Weisbecker * graph tracer but we want them to keep traced by the usual function
10858b96f011SFrederic Weisbecker * tracer if the function graph tracer is not configured.
10868b96f011SFrederic Weisbecker */
10878b96f011SFrederic Weisbecker #define __notrace_funcgraph notrace
10888b96f011SFrederic Weisbecker
1089f201ae23SFrederic Weisbecker #define FTRACE_RETFUNC_DEPTH 50
1090f201ae23SFrederic Weisbecker #define FTRACE_RETSTACK_ALLOC_SIZE 32
1091688f7089SSteven Rostedt (VMware)
1092688f7089SSteven Rostedt (VMware) extern int register_ftrace_graph(struct fgraph_ops *ops);
1093688f7089SSteven Rostedt (VMware) extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1094f201ae23SFrederic Weisbecker
109518bfee32SChristophe Leroy /**
109618bfee32SChristophe Leroy * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
109718bfee32SChristophe Leroy *
109818bfee32SChristophe Leroy * ftrace_graph_stop() is called when a severe error is detected in
109918bfee32SChristophe Leroy * the function graph tracing. This function is called by the critical
110018bfee32SChristophe Leroy * paths of function graph to keep those paths from doing any more harm.
110118bfee32SChristophe Leroy */
110218bfee32SChristophe Leroy DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
110318bfee32SChristophe Leroy
ftrace_graph_is_dead(void)110418bfee32SChristophe Leroy static inline bool ftrace_graph_is_dead(void)
110518bfee32SChristophe Leroy {
110618bfee32SChristophe Leroy return static_branch_unlikely(&kill_ftrace_graph);
110718bfee32SChristophe Leroy }
110818bfee32SChristophe Leroy
110914a866c5SSteven Rostedt extern void ftrace_graph_stop(void);
111014a866c5SSteven Rostedt
1111287b6e68SFrederic Weisbecker /* The current handlers in use */
1112287b6e68SFrederic Weisbecker extern trace_func_graph_ret_t ftrace_graph_return;
1113287b6e68SFrederic Weisbecker extern trace_func_graph_ent_t ftrace_graph_entry;
1114287b6e68SFrederic Weisbecker
1115fb52607aSFrederic Weisbecker extern void ftrace_graph_init_task(struct task_struct *t);
1116fb52607aSFrederic Weisbecker extern void ftrace_graph_exit_task(struct task_struct *t);
1117868baf07SSteven Rostedt extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
111821a8c466SFrederic Weisbecker
pause_graph_tracing(void)1119380c4b14SFrederic Weisbecker static inline void pause_graph_tracing(void)
1120380c4b14SFrederic Weisbecker {
1121380c4b14SFrederic Weisbecker atomic_inc(¤t->tracing_graph_pause);
1122380c4b14SFrederic Weisbecker }
1123380c4b14SFrederic Weisbecker
unpause_graph_tracing(void)1124380c4b14SFrederic Weisbecker static inline void unpause_graph_tracing(void)
1125380c4b14SFrederic Weisbecker {
1126380c4b14SFrederic Weisbecker atomic_dec(¤t->tracing_graph_pause);
1127380c4b14SFrederic Weisbecker }
11285ac9f622SSteven Rostedt #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
11298b96f011SFrederic Weisbecker
11308b96f011SFrederic Weisbecker #define __notrace_funcgraph
11318b96f011SFrederic Weisbecker
ftrace_graph_init_task(struct task_struct * t)1132fb52607aSFrederic Weisbecker static inline void ftrace_graph_init_task(struct task_struct *t) { }
ftrace_graph_exit_task(struct task_struct * t)1133fb52607aSFrederic Weisbecker static inline void ftrace_graph_exit_task(struct task_struct *t) { }
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1134868baf07SSteven Rostedt static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
113521a8c466SFrederic Weisbecker
1136688f7089SSteven Rostedt (VMware) /* Define as macros as fgraph_ops may not be defined */
1137688f7089SSteven Rostedt (VMware) #define register_ftrace_graph(ops) ({ -1; })
1138688f7089SSteven Rostedt (VMware) #define unregister_ftrace_graph(ops) do { } while (0)
1139380c4b14SFrederic Weisbecker
1140223918e3SJosh Poimboeuf static inline unsigned long
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)1141223918e3SJosh Poimboeuf ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1142223918e3SJosh Poimboeuf unsigned long *retp)
1143223918e3SJosh Poimboeuf {
1144223918e3SJosh Poimboeuf return ret;
1145223918e3SJosh Poimboeuf }
1146223918e3SJosh Poimboeuf
pause_graph_tracing(void)1147380c4b14SFrederic Weisbecker static inline void pause_graph_tracing(void) { }
unpause_graph_tracing(void)1148380c4b14SFrederic Weisbecker static inline void unpause_graph_tracing(void) { }
11495ac9f622SSteven Rostedt #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1150caf4b323SFrederic Weisbecker
1151ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1152cecbca96SFrederic Weisbecker enum ftrace_dump_mode;
1153cecbca96SFrederic Weisbecker
1154cecbca96SFrederic Weisbecker extern enum ftrace_dump_mode ftrace_dump_on_oops;
11550daa2302SSteven Rostedt (Red Hat) extern int tracepoint_printk;
1156526211bcSIngo Molnar
1157de7edd31SSteven Rostedt (Red Hat) extern void disable_trace_on_warning(void);
1158de7edd31SSteven Rostedt (Red Hat) extern int __disable_trace_on_warning;
1159de7edd31SSteven Rostedt (Red Hat)
116042391745SSteven Rostedt (Red Hat) int tracepoint_printk_sysctl(struct ctl_table *table, int write,
116132927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos);
116242391745SSteven Rostedt (Red Hat)
1163de7edd31SSteven Rostedt (Red Hat) #else /* CONFIG_TRACING */
disable_trace_on_warning(void)1164de7edd31SSteven Rostedt (Red Hat) static inline void disable_trace_on_warning(void) { }
1165ea4e2bc4SSteven Rostedt #endif /* CONFIG_TRACING */
1166ea4e2bc4SSteven Rostedt
1167e7b8e675SMike Frysinger #ifdef CONFIG_FTRACE_SYSCALLS
1168e7b8e675SMike Frysinger
1169e7b8e675SMike Frysinger unsigned long arch_syscall_addr(int nr);
1170e7b8e675SMike Frysinger
1171e7b8e675SMike Frysinger #endif /* CONFIG_FTRACE_SYSCALLS */
1172e7b8e675SMike Frysinger
117316444a8aSArnaldo Carvalho de Melo #endif /* _LINUX_FTRACE_H */
1174