1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
216444a8aSArnaldo Carvalho de Melo /*
316444a8aSArnaldo Carvalho de Melo * Infrastructure for profiling code inserted by 'gcc -pg'.
416444a8aSArnaldo Carvalho de Melo *
516444a8aSArnaldo Carvalho de Melo * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
616444a8aSArnaldo Carvalho de Melo * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
716444a8aSArnaldo Carvalho de Melo *
816444a8aSArnaldo Carvalho de Melo * Originally ported from the -rt patch by:
916444a8aSArnaldo Carvalho de Melo * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
1016444a8aSArnaldo Carvalho de Melo *
1116444a8aSArnaldo Carvalho de Melo * Based on code in the latency_tracer, that is:
1216444a8aSArnaldo Carvalho de Melo *
1316444a8aSArnaldo Carvalho de Melo * Copyright (C) 2004-2006 Ingo Molnar
146d49e352SNadia Yvette Chambers * Copyright (C) 2004 Nadia Yvette Chambers
1516444a8aSArnaldo Carvalho de Melo */
1616444a8aSArnaldo Carvalho de Melo
173d083395SSteven Rostedt #include <linux/stop_machine.h>
183d083395SSteven Rostedt #include <linux/clocksource.h>
1929930025SIngo Molnar #include <linux/sched/task.h>
203d083395SSteven Rostedt #include <linux/kallsyms.h>
2117911ff3SSteven Rostedt (VMware) #include <linux/security.h>
225072c59fSSteven Rostedt #include <linux/seq_file.h>
238434dc93SSteven Rostedt (Red Hat) #include <linux/tracefs.h>
243d083395SSteven Rostedt #include <linux/hardirq.h>
252d8b820bSIngo Molnar #include <linux/kthread.h>
265072c59fSSteven Rostedt #include <linux/uaccess.h>
275855feadSSteven Rostedt #include <linux/bsearch.h>
2856d82e00SPaul Gortmaker #include <linux/module.h>
292d8b820bSIngo Molnar #include <linux/ftrace.h>
30b0fc494fSSteven Rostedt #include <linux/sysctl.h>
315a0e3ad6STejun Heo #include <linux/slab.h>
325072c59fSSteven Rostedt #include <linux/ctype.h>
3368950619SSteven Rostedt #include <linux/sort.h>
343d083395SSteven Rostedt #include <linux/list.h>
3559df055fSSteven Rostedt #include <linux/hash.h>
363f379b03SPaul E. McKenney #include <linux/rcupdate.h>
37fabe38abSMasami Hiramatsu #include <linux/kprobes.h>
3816444a8aSArnaldo Carvalho de Melo
39ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
408aef2d28SSteven Rostedt
41b80f0f6cSSteven Rostedt (VMware) #include <asm/sections.h>
422af15d6aSSteven Rostedt #include <asm/setup.h>
43395a59d0SAbhishek Sagar
443306fc4aSSteven Rostedt (VMware) #include "ftrace_internal.h"
450706f1c4SSteven Rostedt #include "trace_output.h"
46bac429f0SSteven Rostedt #include "trace_stat.h"
473d083395SSteven Rostedt
48e11b521aSSteven Rostedt (Google) /* Flags that do not get reset */
496ce2c04fSSteven Rostedt (Google) #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
506ce2c04fSSteven Rostedt (Google) FTRACE_FL_MODIFIED)
51e11b521aSSteven Rostedt (Google)
52b39181f7SSteven Rostedt (Google) #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
53b39181f7SSteven Rostedt (Google)
546912896eSSteven Rostedt #define FTRACE_WARN_ON(cond) \
550778d9adSSteven Rostedt ({ \
560778d9adSSteven Rostedt int ___r = cond; \
570778d9adSSteven Rostedt if (WARN_ON(___r)) \
586912896eSSteven Rostedt ftrace_kill(); \
590778d9adSSteven Rostedt ___r; \
600778d9adSSteven Rostedt })
616912896eSSteven Rostedt
626912896eSSteven Rostedt #define FTRACE_WARN_ON_ONCE(cond) \
630778d9adSSteven Rostedt ({ \
640778d9adSSteven Rostedt int ___r = cond; \
650778d9adSSteven Rostedt if (WARN_ON_ONCE(___r)) \
666912896eSSteven Rostedt ftrace_kill(); \
670778d9adSSteven Rostedt ___r; \
680778d9adSSteven Rostedt })
696912896eSSteven Rostedt
708fc0c701SSteven Rostedt /* hash bits for specific function selection */
7133dc9b12SSteven Rostedt #define FTRACE_HASH_DEFAULT_BITS 10
7233dc9b12SSteven Rostedt #define FTRACE_HASH_MAX_BITS 12
738fc0c701SSteven Rostedt
74f04f24fbSMasami Hiramatsu #ifdef CONFIG_DYNAMIC_FTRACE
7533b7f99cSSteven Rostedt (Red Hat) #define INIT_OPS_HASH(opsname) \
7633b7f99cSSteven Rostedt (Red Hat) .func_hash = &opsname.local_hash, \
7733b7f99cSSteven Rostedt (Red Hat) .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
78f04f24fbSMasami Hiramatsu #else
7933b7f99cSSteven Rostedt (Red Hat) #define INIT_OPS_HASH(opsname)
80f04f24fbSMasami Hiramatsu #endif
81f04f24fbSMasami Hiramatsu
82a0572f68SSteven Rostedt (VMware) enum {
83a0572f68SSteven Rostedt (VMware) FTRACE_MODIFY_ENABLE_FL = (1 << 0),
84a0572f68SSteven Rostedt (VMware) FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
85a0572f68SSteven Rostedt (VMware) };
86a0572f68SSteven Rostedt (VMware)
873306fc4aSSteven Rostedt (VMware) struct ftrace_ops ftrace_list_end __read_mostly = {
882f5f6ad9SSteven Rostedt .func = ftrace_stub,
89a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_STUB,
9033b7f99cSSteven Rostedt (Red Hat) INIT_OPS_HASH(ftrace_list_end)
912f5f6ad9SSteven Rostedt };
922f5f6ad9SSteven Rostedt
934eebcc81SSteven Rostedt /* ftrace_enabled is a method to turn ftrace on or off */
944eebcc81SSteven Rostedt int ftrace_enabled __read_mostly;
955d79fa0dSYueHaibing static int __maybe_unused last_ftrace_enabled;
96b0fc494fSSteven Rostedt
972f5f6ad9SSteven Rostedt /* Current function tracing op */
982f5f6ad9SSteven Rostedt struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
99405e1d83SSteven Rostedt (Red Hat) /* What to set function_trace_op to */
100405e1d83SSteven Rostedt (Red Hat) static struct ftrace_ops *set_function_trace_op;
10160a7ecf4SSteven Rostedt
ftrace_pids_enabled(struct ftrace_ops * ops)102345ddcc8SSteven Rostedt (Red Hat) static bool ftrace_pids_enabled(struct ftrace_ops *ops)
103e3eea140SSteven Rostedt (Red Hat) {
104345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr;
105345ddcc8SSteven Rostedt (Red Hat)
106345ddcc8SSteven Rostedt (Red Hat) if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
107345ddcc8SSteven Rostedt (Red Hat) return false;
108345ddcc8SSteven Rostedt (Red Hat)
109345ddcc8SSteven Rostedt (Red Hat) tr = ops->private;
110345ddcc8SSteven Rostedt (Red Hat)
111b3b1e6edSSteven Rostedt (VMware) return tr->function_pids != NULL || tr->function_no_pids != NULL;
112e3eea140SSteven Rostedt (Red Hat) }
113e3eea140SSteven Rostedt (Red Hat)
114e3eea140SSteven Rostedt (Red Hat) static void ftrace_update_trampoline(struct ftrace_ops *ops);
115e3eea140SSteven Rostedt (Red Hat)
1164eebcc81SSteven Rostedt /*
1174eebcc81SSteven Rostedt * ftrace_disabled is set when an anomaly is discovered.
1184eebcc81SSteven Rostedt * ftrace_disabled is much stronger than ftrace_enabled.
1194eebcc81SSteven Rostedt */
1204eebcc81SSteven Rostedt static int ftrace_disabled __read_mostly;
1214eebcc81SSteven Rostedt
1223306fc4aSSteven Rostedt (VMware) DEFINE_MUTEX(ftrace_lock);
123b0fc494fSSteven Rostedt
1243306fc4aSSteven Rostedt (VMware) struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
12516444a8aSArnaldo Carvalho de Melo ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
1263306fc4aSSteven Rostedt (VMware) struct ftrace_ops global_ops;
12716444a8aSArnaldo Carvalho de Melo
12850c69781SJulia Lawall /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
12934cdd18bSSteven Rostedt (VMware) void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
130d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
131b848914cSSteven Rostedt
132cbad0fb2SMark Rutland #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
133cbad0fb2SMark Rutland /*
134cbad0fb2SMark Rutland * Stub used to invoke the list ops without requiring a separate trampoline.
135cbad0fb2SMark Rutland */
136cbad0fb2SMark Rutland const struct ftrace_ops ftrace_list_ops = {
137cbad0fb2SMark Rutland .func = ftrace_ops_list_func,
138cbad0fb2SMark Rutland .flags = FTRACE_OPS_FL_STUB,
139cbad0fb2SMark Rutland };
140cbad0fb2SMark Rutland
ftrace_ops_nop_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)141cbad0fb2SMark Rutland static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
142cbad0fb2SMark Rutland struct ftrace_ops *op,
143cbad0fb2SMark Rutland struct ftrace_regs *fregs)
144cbad0fb2SMark Rutland {
145cbad0fb2SMark Rutland /* do nothing */
146cbad0fb2SMark Rutland }
147cbad0fb2SMark Rutland
148cbad0fb2SMark Rutland /*
149cbad0fb2SMark Rutland * Stub used when a call site is disabled. May be called transiently by threads
150cbad0fb2SMark Rutland * which have made it into ftrace_caller but haven't yet recovered the ops at
151cbad0fb2SMark Rutland * the point the call site is disabled.
152cbad0fb2SMark Rutland */
153cbad0fb2SMark Rutland const struct ftrace_ops ftrace_nop_ops = {
154cbad0fb2SMark Rutland .func = ftrace_ops_nop_func,
155cbad0fb2SMark Rutland .flags = FTRACE_OPS_FL_STUB,
156cbad0fb2SMark Rutland };
157cbad0fb2SMark Rutland #endif
158cbad0fb2SMark Rutland
ftrace_ops_init(struct ftrace_ops * ops)159f04f24fbSMasami Hiramatsu static inline void ftrace_ops_init(struct ftrace_ops *ops)
160f04f24fbSMasami Hiramatsu {
161f04f24fbSMasami Hiramatsu #ifdef CONFIG_DYNAMIC_FTRACE
162f04f24fbSMasami Hiramatsu if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
16333b7f99cSSteven Rostedt (Red Hat) mutex_init(&ops->local_hash.regex_lock);
16433b7f99cSSteven Rostedt (Red Hat) ops->func_hash = &ops->local_hash;
165f04f24fbSMasami Hiramatsu ops->flags |= FTRACE_OPS_FL_INITIALIZED;
166f04f24fbSMasami Hiramatsu }
167f04f24fbSMasami Hiramatsu #endif
168f04f24fbSMasami Hiramatsu }
169f04f24fbSMasami Hiramatsu
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)1702f5f6ad9SSteven Rostedt static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
171d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
172df4fc315SSteven Rostedt {
173345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = op->private;
174717e3f5eSSteven Rostedt (VMware) int pid;
175345ddcc8SSteven Rostedt (Red Hat)
176717e3f5eSSteven Rostedt (VMware) if (tr) {
177717e3f5eSSteven Rostedt (VMware) pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
178717e3f5eSSteven Rostedt (VMware) if (pid == FTRACE_PID_IGNORE)
179df4fc315SSteven Rostedt return;
180717e3f5eSSteven Rostedt (VMware) if (pid != FTRACE_PID_TRACE &&
181717e3f5eSSteven Rostedt (VMware) pid != current->pid)
182717e3f5eSSteven Rostedt (VMware) return;
183717e3f5eSSteven Rostedt (VMware) }
184df4fc315SSteven Rostedt
185d19ad077SSteven Rostedt (VMware) op->saved_func(ip, parent_ip, op, fregs);
186df4fc315SSteven Rostedt }
187df4fc315SSteven Rostedt
ftrace_sync_ipi(void * data)188405e1d83SSteven Rostedt (Red Hat) static void ftrace_sync_ipi(void *data)
189405e1d83SSteven Rostedt (Red Hat) {
190405e1d83SSteven Rostedt (Red Hat) /* Probably not needed, but do it anyway */
191405e1d83SSteven Rostedt (Red Hat) smp_rmb();
192405e1d83SSteven Rostedt (Red Hat) }
193405e1d83SSteven Rostedt (Red Hat)
ftrace_ops_get_list_func(struct ftrace_ops * ops)19400ccbf2fSSteven Rostedt (Red Hat) static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
19500ccbf2fSSteven Rostedt (Red Hat) {
19600ccbf2fSSteven Rostedt (Red Hat) /*
19778a01febSZheng Yejian * If this is a dynamic or RCU ops, or we force list func,
19800ccbf2fSSteven Rostedt (Red Hat) * then it needs to call the list anyway.
19900ccbf2fSSteven Rostedt (Red Hat) */
200b3a88803SPeter Zijlstra if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
201b3a88803SPeter Zijlstra FTRACE_FORCE_LIST_FUNC)
20200ccbf2fSSteven Rostedt (Red Hat) return ftrace_ops_list_func;
20300ccbf2fSSteven Rostedt (Red Hat)
20400ccbf2fSSteven Rostedt (Red Hat) return ftrace_ops_get_func(ops);
20500ccbf2fSSteven Rostedt (Red Hat) }
20600ccbf2fSSteven Rostedt (Red Hat)
update_ftrace_function(void)2072b499381SSteven Rostedt static void update_ftrace_function(void)
2082b499381SSteven Rostedt {
2092b499381SSteven Rostedt ftrace_func_t func;
2102b499381SSteven Rostedt
211cdbe61bfSSteven Rostedt /*
212f7aad4e1SSteven Rostedt (Red Hat) * Prepare the ftrace_ops that the arch callback will use.
213f7aad4e1SSteven Rostedt (Red Hat) * If there's only one ftrace_ops registered, the ftrace_ops_list
214f7aad4e1SSteven Rostedt (Red Hat) * will point to the ops we want.
215f7aad4e1SSteven Rostedt (Red Hat) */
216f86f4180SChunyan Zhang set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
217f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock));
218f7aad4e1SSteven Rostedt (Red Hat)
219f7aad4e1SSteven Rostedt (Red Hat) /* If there's no ftrace_ops registered, just call the stub function */
220f86f4180SChunyan Zhang if (set_function_trace_op == &ftrace_list_end) {
221f7aad4e1SSteven Rostedt (Red Hat) func = ftrace_stub;
222f7aad4e1SSteven Rostedt (Red Hat)
223f7aad4e1SSteven Rostedt (Red Hat) /*
224cdbe61bfSSteven Rostedt * If we are at the end of the list and this ops is
2254740974aSSteven Rostedt * recursion safe and not dynamic and the arch supports passing ops,
2264740974aSSteven Rostedt * then have the mcount trampoline call the function directly.
227cdbe61bfSSteven Rostedt */
228f86f4180SChunyan Zhang } else if (rcu_dereference_protected(ftrace_ops_list->next,
229f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
23000ccbf2fSSteven Rostedt (Red Hat) func = ftrace_ops_get_list_func(ftrace_ops_list);
231f7aad4e1SSteven Rostedt (Red Hat)
2322f5f6ad9SSteven Rostedt } else {
2332f5f6ad9SSteven Rostedt /* Just use the default ftrace_ops */
234405e1d83SSteven Rostedt (Red Hat) set_function_trace_op = &ftrace_list_end;
235b848914cSSteven Rostedt func = ftrace_ops_list_func;
2362f5f6ad9SSteven Rostedt }
2372b499381SSteven Rostedt
2385f8bf2d2SSteven Rostedt (Red Hat) update_function_graph_func();
2395f8bf2d2SSteven Rostedt (Red Hat)
240405e1d83SSteven Rostedt (Red Hat) /* If there's no change, then do nothing more here */
241405e1d83SSteven Rostedt (Red Hat) if (ftrace_trace_function == func)
242405e1d83SSteven Rostedt (Red Hat) return;
243405e1d83SSteven Rostedt (Red Hat)
244405e1d83SSteven Rostedt (Red Hat) /*
245405e1d83SSteven Rostedt (Red Hat) * If we are using the list function, it doesn't care
246405e1d83SSteven Rostedt (Red Hat) * about the function_trace_ops.
247405e1d83SSteven Rostedt (Red Hat) */
248405e1d83SSteven Rostedt (Red Hat) if (func == ftrace_ops_list_func) {
249405e1d83SSteven Rostedt (Red Hat) ftrace_trace_function = func;
250405e1d83SSteven Rostedt (Red Hat) /*
251405e1d83SSteven Rostedt (Red Hat) * Don't even bother setting function_trace_ops,
252405e1d83SSteven Rostedt (Red Hat) * it would be racy to do so anyway.
253405e1d83SSteven Rostedt (Red Hat) */
254405e1d83SSteven Rostedt (Red Hat) return;
255405e1d83SSteven Rostedt (Red Hat) }
256405e1d83SSteven Rostedt (Red Hat)
257405e1d83SSteven Rostedt (Red Hat) #ifndef CONFIG_DYNAMIC_FTRACE
258405e1d83SSteven Rostedt (Red Hat) /*
259405e1d83SSteven Rostedt (Red Hat) * For static tracing, we need to be a bit more careful.
260405e1d83SSteven Rostedt (Red Hat) * The function change takes affect immediately. Thus,
261fdda88d3SQiujun Huang * we need to coordinate the setting of the function_trace_ops
262405e1d83SSteven Rostedt (Red Hat) * with the setting of the ftrace_trace_function.
263405e1d83SSteven Rostedt (Red Hat) *
264405e1d83SSteven Rostedt (Red Hat) * Set the function to the list ops, which will call the
265405e1d83SSteven Rostedt (Red Hat) * function we want, albeit indirectly, but it handles the
266405e1d83SSteven Rostedt (Red Hat) * ftrace_ops and doesn't depend on function_trace_op.
267405e1d83SSteven Rostedt (Red Hat) */
268405e1d83SSteven Rostedt (Red Hat) ftrace_trace_function = ftrace_ops_list_func;
269405e1d83SSteven Rostedt (Red Hat) /*
270405e1d83SSteven Rostedt (Red Hat) * Make sure all CPUs see this. Yes this is slow, but static
271405e1d83SSteven Rostedt (Red Hat) * tracing is slow and nasty to have enabled.
272405e1d83SSteven Rostedt (Red Hat) */
273e5a971d7SPaul E. McKenney synchronize_rcu_tasks_rude();
274405e1d83SSteven Rostedt (Red Hat) /* Now all cpus are using the list ops. */
275405e1d83SSteven Rostedt (Red Hat) function_trace_op = set_function_trace_op;
276405e1d83SSteven Rostedt (Red Hat) /* Make sure the function_trace_op is visible on all CPUs */
277405e1d83SSteven Rostedt (Red Hat) smp_wmb();
278405e1d83SSteven Rostedt (Red Hat) /* Nasty way to force a rmb on all cpus */
279405e1d83SSteven Rostedt (Red Hat) smp_call_function(ftrace_sync_ipi, NULL, 1);
280405e1d83SSteven Rostedt (Red Hat) /* OK, we are all set to update the ftrace_trace_function now! */
281405e1d83SSteven Rostedt (Red Hat) #endif /* !CONFIG_DYNAMIC_FTRACE */
282405e1d83SSteven Rostedt (Red Hat)
283491d0dcfSSteven Rostedt ftrace_trace_function = func;
284491d0dcfSSteven Rostedt }
285491d0dcfSSteven Rostedt
add_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)286f86f4180SChunyan Zhang static void add_ftrace_ops(struct ftrace_ops __rcu **list,
287f86f4180SChunyan Zhang struct ftrace_ops *ops)
2883d083395SSteven Rostedt {
289f86f4180SChunyan Zhang rcu_assign_pointer(ops->next, *list);
290f86f4180SChunyan Zhang
29116444a8aSArnaldo Carvalho de Melo /*
292b848914cSSteven Rostedt * We are entering ops into the list but another
29316444a8aSArnaldo Carvalho de Melo * CPU might be walking that list. We need to make sure
29416444a8aSArnaldo Carvalho de Melo * the ops->next pointer is valid before another CPU sees
295b848914cSSteven Rostedt * the ops pointer included into the list.
29616444a8aSArnaldo Carvalho de Melo */
2972b499381SSteven Rostedt rcu_assign_pointer(*list, ops);
2982b499381SSteven Rostedt }
2993d083395SSteven Rostedt
remove_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)300f86f4180SChunyan Zhang static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
301f86f4180SChunyan Zhang struct ftrace_ops *ops)
3022b499381SSteven Rostedt {
3032b499381SSteven Rostedt struct ftrace_ops **p;
3042b499381SSteven Rostedt
3052b499381SSteven Rostedt /*
3062b499381SSteven Rostedt * If we are removing the last function, then simply point
3072b499381SSteven Rostedt * to the ftrace_stub.
3082b499381SSteven Rostedt */
309f86f4180SChunyan Zhang if (rcu_dereference_protected(*list,
310f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock)) == ops &&
311f86f4180SChunyan Zhang rcu_dereference_protected(ops->next,
312f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3132b499381SSteven Rostedt *list = &ftrace_list_end;
3142b499381SSteven Rostedt return 0;
3152b499381SSteven Rostedt }
3162b499381SSteven Rostedt
3172b499381SSteven Rostedt for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
3182b499381SSteven Rostedt if (*p == ops)
3192b499381SSteven Rostedt break;
3202b499381SSteven Rostedt
3212b499381SSteven Rostedt if (*p != ops)
3222b499381SSteven Rostedt return -1;
3232b499381SSteven Rostedt
3242b499381SSteven Rostedt *p = (*p)->next;
3252b499381SSteven Rostedt return 0;
3262b499381SSteven Rostedt }
3272b499381SSteven Rostedt
328f3bea491SSteven Rostedt (Red Hat) static void ftrace_update_trampoline(struct ftrace_ops *ops);
329f3bea491SSteven Rostedt (Red Hat)
__register_ftrace_function(struct ftrace_ops * ops)3303306fc4aSSteven Rostedt (VMware) int __register_ftrace_function(struct ftrace_ops *ops)
3312b499381SSteven Rostedt {
332591dffdaSSteven Rostedt (Red Hat) if (ops->flags & FTRACE_OPS_FL_DELETED)
333591dffdaSSteven Rostedt (Red Hat) return -EINVAL;
334591dffdaSSteven Rostedt (Red Hat)
335b848914cSSteven Rostedt if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
336b848914cSSteven Rostedt return -EBUSY;
337b848914cSSteven Rostedt
33806aeaaeaSMasami Hiramatsu #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
33908f6fba5SSteven Rostedt /*
34008f6fba5SSteven Rostedt * If the ftrace_ops specifies SAVE_REGS, then it only can be used
34108f6fba5SSteven Rostedt * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
34208f6fba5SSteven Rostedt * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
34308f6fba5SSteven Rostedt */
34408f6fba5SSteven Rostedt if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
34508f6fba5SSteven Rostedt !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
34608f6fba5SSteven Rostedt return -EINVAL;
34708f6fba5SSteven Rostedt
34808f6fba5SSteven Rostedt if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
34908f6fba5SSteven Rostedt ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
35008f6fba5SSteven Rostedt #endif
3517162431dSMiroslav Benes if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
3527162431dSMiroslav Benes return -EBUSY;
35308f6fba5SSteven Rostedt
354a20deb3aSKefeng Wang if (!is_kernel_core_data((unsigned long)ops))
355cdbe61bfSSteven Rostedt ops->flags |= FTRACE_OPS_FL_DYNAMIC;
356cdbe61bfSSteven Rostedt
357b848914cSSteven Rostedt add_ftrace_ops(&ftrace_ops_list, ops);
358b848914cSSteven Rostedt
359e3eea140SSteven Rostedt (Red Hat) /* Always save the function, and reset at unregistering */
360e3eea140SSteven Rostedt (Red Hat) ops->saved_func = ops->func;
361e3eea140SSteven Rostedt (Red Hat)
362345ddcc8SSteven Rostedt (Red Hat) if (ftrace_pids_enabled(ops))
363e3eea140SSteven Rostedt (Red Hat) ops->func = ftrace_pid_func;
364e3eea140SSteven Rostedt (Red Hat)
365f3bea491SSteven Rostedt (Red Hat) ftrace_update_trampoline(ops);
366f3bea491SSteven Rostedt (Red Hat)
367491d0dcfSSteven Rostedt if (ftrace_enabled)
368491d0dcfSSteven Rostedt update_ftrace_function();
3693d083395SSteven Rostedt
37016444a8aSArnaldo Carvalho de Melo return 0;
37116444a8aSArnaldo Carvalho de Melo }
37216444a8aSArnaldo Carvalho de Melo
__unregister_ftrace_function(struct ftrace_ops * ops)3733306fc4aSSteven Rostedt (VMware) int __unregister_ftrace_function(struct ftrace_ops *ops)
37416444a8aSArnaldo Carvalho de Melo {
3752b499381SSteven Rostedt int ret;
37616444a8aSArnaldo Carvalho de Melo
377b848914cSSteven Rostedt if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
378b848914cSSteven Rostedt return -EBUSY;
379b848914cSSteven Rostedt
380b848914cSSteven Rostedt ret = remove_ftrace_ops(&ftrace_ops_list, ops);
381b848914cSSteven Rostedt
3822b499381SSteven Rostedt if (ret < 0)
3832b499381SSteven Rostedt return ret;
384b848914cSSteven Rostedt
385491d0dcfSSteven Rostedt if (ftrace_enabled)
386491d0dcfSSteven Rostedt update_ftrace_function();
38716444a8aSArnaldo Carvalho de Melo
388e3eea140SSteven Rostedt (Red Hat) ops->func = ops->saved_func;
389e3eea140SSteven Rostedt (Red Hat)
390e6ea44e9SSteven Rostedt return 0;
3913d083395SSteven Rostedt }
3923d083395SSteven Rostedt
ftrace_update_pid_func(void)393df4fc315SSteven Rostedt static void ftrace_update_pid_func(void)
394df4fc315SSteven Rostedt {
395e3eea140SSteven Rostedt (Red Hat) struct ftrace_ops *op;
396e3eea140SSteven Rostedt (Red Hat)
397491d0dcfSSteven Rostedt /* Only do something if we are tracing something */
398df4fc315SSteven Rostedt if (ftrace_trace_function == ftrace_stub)
39910dd3ebeSKOSAKI Motohiro return;
400df4fc315SSteven Rostedt
401e3eea140SSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
402e3eea140SSteven Rostedt (Red Hat) if (op->flags & FTRACE_OPS_FL_PID) {
403345ddcc8SSteven Rostedt (Red Hat) op->func = ftrace_pids_enabled(op) ?
404345ddcc8SSteven Rostedt (Red Hat) ftrace_pid_func : op->saved_func;
405e3eea140SSteven Rostedt (Red Hat) ftrace_update_trampoline(op);
406e3eea140SSteven Rostedt (Red Hat) }
407e3eea140SSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
408e3eea140SSteven Rostedt (Red Hat)
409491d0dcfSSteven Rostedt update_ftrace_function();
410df4fc315SSteven Rostedt }
411df4fc315SSteven Rostedt
412493762fcSSteven Rostedt #ifdef CONFIG_FUNCTION_PROFILER
413493762fcSSteven Rostedt struct ftrace_profile {
414493762fcSSteven Rostedt struct hlist_node node;
415493762fcSSteven Rostedt unsigned long ip;
416493762fcSSteven Rostedt unsigned long counter;
4170706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4180706f1c4SSteven Rostedt unsigned long long time;
419e330b3bcSChase Douglas unsigned long long time_squared;
4200706f1c4SSteven Rostedt #endif
421493762fcSSteven Rostedt };
422493762fcSSteven Rostedt
423493762fcSSteven Rostedt struct ftrace_profile_page {
424493762fcSSteven Rostedt struct ftrace_profile_page *next;
425493762fcSSteven Rostedt unsigned long index;
426493762fcSSteven Rostedt struct ftrace_profile records[];
427493762fcSSteven Rostedt };
428493762fcSSteven Rostedt
429cafb168aSSteven Rostedt struct ftrace_profile_stat {
430cafb168aSSteven Rostedt atomic_t disabled;
431cafb168aSSteven Rostedt struct hlist_head *hash;
432cafb168aSSteven Rostedt struct ftrace_profile_page *pages;
433cafb168aSSteven Rostedt struct ftrace_profile_page *start;
434cafb168aSSteven Rostedt struct tracer_stat stat;
435cafb168aSSteven Rostedt };
436cafb168aSSteven Rostedt
437493762fcSSteven Rostedt #define PROFILE_RECORDS_SIZE \
438493762fcSSteven Rostedt (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
439493762fcSSteven Rostedt
440493762fcSSteven Rostedt #define PROFILES_PER_PAGE \
441493762fcSSteven Rostedt (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
442493762fcSSteven Rostedt
443fb9fb015SSteven Rostedt static int ftrace_profile_enabled __read_mostly;
444fb9fb015SSteven Rostedt
445fb9fb015SSteven Rostedt /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
446493762fcSSteven Rostedt static DEFINE_MUTEX(ftrace_profile_lock);
447493762fcSSteven Rostedt
448cafb168aSSteven Rostedt static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
449493762fcSSteven Rostedt
45020079ebeSNamhyung Kim #define FTRACE_PROFILE_HASH_BITS 10
45120079ebeSNamhyung Kim #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
452493762fcSSteven Rostedt
453493762fcSSteven Rostedt static void *
function_stat_next(void * v,int idx)454493762fcSSteven Rostedt function_stat_next(void *v, int idx)
455493762fcSSteven Rostedt {
456493762fcSSteven Rostedt struct ftrace_profile *rec = v;
457493762fcSSteven Rostedt struct ftrace_profile_page *pg;
458493762fcSSteven Rostedt
459493762fcSSteven Rostedt pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
460493762fcSSteven Rostedt
461493762fcSSteven Rostedt again:
4620296e425SLi Zefan if (idx != 0)
463493762fcSSteven Rostedt rec++;
4640296e425SLi Zefan
465493762fcSSteven Rostedt if ((void *)rec >= (void *)&pg->records[pg->index]) {
466493762fcSSteven Rostedt pg = pg->next;
467493762fcSSteven Rostedt if (!pg)
468493762fcSSteven Rostedt return NULL;
469493762fcSSteven Rostedt rec = &pg->records[0];
470493762fcSSteven Rostedt if (!rec->counter)
471493762fcSSteven Rostedt goto again;
472493762fcSSteven Rostedt }
473493762fcSSteven Rostedt
474493762fcSSteven Rostedt return rec;
475493762fcSSteven Rostedt }
476493762fcSSteven Rostedt
function_stat_start(struct tracer_stat * trace)477493762fcSSteven Rostedt static void *function_stat_start(struct tracer_stat *trace)
478493762fcSSteven Rostedt {
479cafb168aSSteven Rostedt struct ftrace_profile_stat *stat =
480cafb168aSSteven Rostedt container_of(trace, struct ftrace_profile_stat, stat);
481cafb168aSSteven Rostedt
482cafb168aSSteven Rostedt if (!stat || !stat->start)
483cafb168aSSteven Rostedt return NULL;
484cafb168aSSteven Rostedt
485cafb168aSSteven Rostedt return function_stat_next(&stat->start->records[0], 0);
486493762fcSSteven Rostedt }
487493762fcSSteven Rostedt
4880706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4890706f1c4SSteven Rostedt /* function graph compares on total time */
function_stat_cmp(const void * p1,const void * p2)49080042c8fSAndy Shevchenko static int function_stat_cmp(const void *p1, const void *p2)
4910706f1c4SSteven Rostedt {
49280042c8fSAndy Shevchenko const struct ftrace_profile *a = p1;
49380042c8fSAndy Shevchenko const struct ftrace_profile *b = p2;
4940706f1c4SSteven Rostedt
4950706f1c4SSteven Rostedt if (a->time < b->time)
4960706f1c4SSteven Rostedt return -1;
4970706f1c4SSteven Rostedt if (a->time > b->time)
4980706f1c4SSteven Rostedt return 1;
4990706f1c4SSteven Rostedt else
5000706f1c4SSteven Rostedt return 0;
5010706f1c4SSteven Rostedt }
5020706f1c4SSteven Rostedt #else
5030706f1c4SSteven Rostedt /* not function graph compares against hits */
function_stat_cmp(const void * p1,const void * p2)50480042c8fSAndy Shevchenko static int function_stat_cmp(const void *p1, const void *p2)
505493762fcSSteven Rostedt {
50680042c8fSAndy Shevchenko const struct ftrace_profile *a = p1;
50780042c8fSAndy Shevchenko const struct ftrace_profile *b = p2;
508493762fcSSteven Rostedt
509493762fcSSteven Rostedt if (a->counter < b->counter)
510493762fcSSteven Rostedt return -1;
511493762fcSSteven Rostedt if (a->counter > b->counter)
512493762fcSSteven Rostedt return 1;
513493762fcSSteven Rostedt else
514493762fcSSteven Rostedt return 0;
515493762fcSSteven Rostedt }
5160706f1c4SSteven Rostedt #endif
517493762fcSSteven Rostedt
function_stat_headers(struct seq_file * m)518493762fcSSteven Rostedt static int function_stat_headers(struct seq_file *m)
519493762fcSSteven Rostedt {
5200706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
521fa6f0cc7SRasmus Villemoes seq_puts(m, " Function "
522e330b3bcSChase Douglas "Hit Time Avg s^2\n"
52334886c8bSSteven Rostedt " -------- "
524e330b3bcSChase Douglas "--- ---- --- ---\n");
5250706f1c4SSteven Rostedt #else
526fa6f0cc7SRasmus Villemoes seq_puts(m, " Function Hit\n"
527493762fcSSteven Rostedt " -------- ---\n");
5280706f1c4SSteven Rostedt #endif
529493762fcSSteven Rostedt return 0;
530493762fcSSteven Rostedt }
531493762fcSSteven Rostedt
function_stat_show(struct seq_file * m,void * v)532493762fcSSteven Rostedt static int function_stat_show(struct seq_file *m, void *v)
533493762fcSSteven Rostedt {
534493762fcSSteven Rostedt struct ftrace_profile *rec = v;
535493762fcSSteven Rostedt char str[KSYM_SYMBOL_LEN];
5363aaba20fSLi Zefan int ret = 0;
5370706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
53834886c8bSSteven Rostedt static struct trace_seq s;
53934886c8bSSteven Rostedt unsigned long long avg;
540e330b3bcSChase Douglas unsigned long long stddev;
541*f58a3f8eSNikolay Kuratov unsigned long long stddev_denom;
5420706f1c4SSteven Rostedt #endif
5433aaba20fSLi Zefan mutex_lock(&ftrace_profile_lock);
5443aaba20fSLi Zefan
5453aaba20fSLi Zefan /* we raced with function_profile_reset() */
5463aaba20fSLi Zefan if (unlikely(rec->counter == 0)) {
5473aaba20fSLi Zefan ret = -EBUSY;
5483aaba20fSLi Zefan goto out;
5493aaba20fSLi Zefan }
550493762fcSSteven Rostedt
5518e436ca0SUmesh Tiwari #ifdef CONFIG_FUNCTION_GRAPH_TRACER
552e31f7939SWen Yang avg = div64_ul(rec->time, rec->counter);
5538e436ca0SUmesh Tiwari if (tracing_thresh && (avg < tracing_thresh))
5548e436ca0SUmesh Tiwari goto out;
5558e436ca0SUmesh Tiwari #endif
5568e436ca0SUmesh Tiwari
557493762fcSSteven Rostedt kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
5580706f1c4SSteven Rostedt seq_printf(m, " %-30.30s %10lu", str, rec->counter);
559493762fcSSteven Rostedt
5600706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
561fa6f0cc7SRasmus Villemoes seq_puts(m, " ");
56234886c8bSSteven Rostedt
56352d85d76SJuri Lelli /*
564*f58a3f8eSNikolay Kuratov * Variance formula:
56552d85d76SJuri Lelli * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
566*f58a3f8eSNikolay Kuratov * Maybe Welford's method is better here?
567*f58a3f8eSNikolay Kuratov * Divide only by 1000 for ns^2 -> us^2 conversion.
568*f58a3f8eSNikolay Kuratov * trace_print_graph_duration will divide by 1000 again.
56952d85d76SJuri Lelli */
570*f58a3f8eSNikolay Kuratov stddev = 0;
571*f58a3f8eSNikolay Kuratov stddev_denom = rec->counter * (rec->counter - 1) * 1000;
572*f58a3f8eSNikolay Kuratov if (stddev_denom) {
57352d85d76SJuri Lelli stddev = rec->counter * rec->time_squared -
57452d85d76SJuri Lelli rec->time * rec->time;
575*f58a3f8eSNikolay Kuratov stddev = div64_ul(stddev, stddev_denom);
576e330b3bcSChase Douglas }
577e330b3bcSChase Douglas
57834886c8bSSteven Rostedt trace_seq_init(&s);
57934886c8bSSteven Rostedt trace_print_graph_duration(rec->time, &s);
58034886c8bSSteven Rostedt trace_seq_puts(&s, " ");
58134886c8bSSteven Rostedt trace_print_graph_duration(avg, &s);
582e330b3bcSChase Douglas trace_seq_puts(&s, " ");
583e330b3bcSChase Douglas trace_print_graph_duration(stddev, &s);
5840706f1c4SSteven Rostedt trace_print_seq(m, &s);
5850706f1c4SSteven Rostedt #endif
5860706f1c4SSteven Rostedt seq_putc(m, '\n');
5873aaba20fSLi Zefan out:
5883aaba20fSLi Zefan mutex_unlock(&ftrace_profile_lock);
5890706f1c4SSteven Rostedt
5903aaba20fSLi Zefan return ret;
591493762fcSSteven Rostedt }
592493762fcSSteven Rostedt
ftrace_profile_reset(struct ftrace_profile_stat * stat)593cafb168aSSteven Rostedt static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
594493762fcSSteven Rostedt {
595493762fcSSteven Rostedt struct ftrace_profile_page *pg;
596493762fcSSteven Rostedt
597cafb168aSSteven Rostedt pg = stat->pages = stat->start;
598493762fcSSteven Rostedt
599493762fcSSteven Rostedt while (pg) {
600493762fcSSteven Rostedt memset(pg->records, 0, PROFILE_RECORDS_SIZE);
601493762fcSSteven Rostedt pg->index = 0;
602493762fcSSteven Rostedt pg = pg->next;
603493762fcSSteven Rostedt }
604493762fcSSteven Rostedt
605cafb168aSSteven Rostedt memset(stat->hash, 0,
606493762fcSSteven Rostedt FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
607493762fcSSteven Rostedt }
608493762fcSSteven Rostedt
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)609172f7ba9Schongjiapeng static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
610493762fcSSteven Rostedt {
611493762fcSSteven Rostedt struct ftrace_profile_page *pg;
612318e0a73SSteven Rostedt int functions;
613318e0a73SSteven Rostedt int pages;
614493762fcSSteven Rostedt int i;
615493762fcSSteven Rostedt
616493762fcSSteven Rostedt /* If we already allocated, do nothing */
617cafb168aSSteven Rostedt if (stat->pages)
618493762fcSSteven Rostedt return 0;
619493762fcSSteven Rostedt
620cafb168aSSteven Rostedt stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
621cafb168aSSteven Rostedt if (!stat->pages)
622493762fcSSteven Rostedt return -ENOMEM;
623493762fcSSteven Rostedt
624318e0a73SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
625318e0a73SSteven Rostedt functions = ftrace_update_tot_cnt;
626318e0a73SSteven Rostedt #else
627318e0a73SSteven Rostedt /*
628318e0a73SSteven Rostedt * We do not know the number of functions that exist because
629318e0a73SSteven Rostedt * dynamic tracing is what counts them. With past experience
630318e0a73SSteven Rostedt * we have around 20K functions. That should be more than enough.
631318e0a73SSteven Rostedt * It is highly unlikely we will execute every function in
632318e0a73SSteven Rostedt * the kernel.
633318e0a73SSteven Rostedt */
634318e0a73SSteven Rostedt functions = 20000;
635318e0a73SSteven Rostedt #endif
636318e0a73SSteven Rostedt
637cafb168aSSteven Rostedt pg = stat->start = stat->pages;
638493762fcSSteven Rostedt
639318e0a73SSteven Rostedt pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
640318e0a73SSteven Rostedt
64139e30cd1SNamhyung Kim for (i = 1; i < pages; i++) {
642493762fcSSteven Rostedt pg->next = (void *)get_zeroed_page(GFP_KERNEL);
643493762fcSSteven Rostedt if (!pg->next)
644318e0a73SSteven Rostedt goto out_free;
645493762fcSSteven Rostedt pg = pg->next;
646493762fcSSteven Rostedt }
647493762fcSSteven Rostedt
648493762fcSSteven Rostedt return 0;
649318e0a73SSteven Rostedt
650318e0a73SSteven Rostedt out_free:
651318e0a73SSteven Rostedt pg = stat->start;
652318e0a73SSteven Rostedt while (pg) {
653318e0a73SSteven Rostedt unsigned long tmp = (unsigned long)pg;
654318e0a73SSteven Rostedt
655318e0a73SSteven Rostedt pg = pg->next;
656318e0a73SSteven Rostedt free_page(tmp);
657318e0a73SSteven Rostedt }
658318e0a73SSteven Rostedt
659318e0a73SSteven Rostedt stat->pages = NULL;
660318e0a73SSteven Rostedt stat->start = NULL;
661318e0a73SSteven Rostedt
662318e0a73SSteven Rostedt return -ENOMEM;
663493762fcSSteven Rostedt }
664493762fcSSteven Rostedt
ftrace_profile_init_cpu(int cpu)665cafb168aSSteven Rostedt static int ftrace_profile_init_cpu(int cpu)
666493762fcSSteven Rostedt {
667cafb168aSSteven Rostedt struct ftrace_profile_stat *stat;
668493762fcSSteven Rostedt int size;
669493762fcSSteven Rostedt
670cafb168aSSteven Rostedt stat = &per_cpu(ftrace_profile_stats, cpu);
671cafb168aSSteven Rostedt
672cafb168aSSteven Rostedt if (stat->hash) {
673493762fcSSteven Rostedt /* If the profile is already created, simply reset it */
674cafb168aSSteven Rostedt ftrace_profile_reset(stat);
675493762fcSSteven Rostedt return 0;
676493762fcSSteven Rostedt }
677493762fcSSteven Rostedt
678493762fcSSteven Rostedt /*
679493762fcSSteven Rostedt * We are profiling all functions, but usually only a few thousand
680493762fcSSteven Rostedt * functions are hit. We'll make a hash of 1024 items.
681493762fcSSteven Rostedt */
682493762fcSSteven Rostedt size = FTRACE_PROFILE_HASH_SIZE;
683493762fcSSteven Rostedt
6846396bb22SKees Cook stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
685493762fcSSteven Rostedt
686cafb168aSSteven Rostedt if (!stat->hash)
687493762fcSSteven Rostedt return -ENOMEM;
688493762fcSSteven Rostedt
689318e0a73SSteven Rostedt /* Preallocate the function profiling pages */
690cafb168aSSteven Rostedt if (ftrace_profile_pages_init(stat) < 0) {
691cafb168aSSteven Rostedt kfree(stat->hash);
692cafb168aSSteven Rostedt stat->hash = NULL;
693493762fcSSteven Rostedt return -ENOMEM;
694493762fcSSteven Rostedt }
695493762fcSSteven Rostedt
696493762fcSSteven Rostedt return 0;
697493762fcSSteven Rostedt }
698493762fcSSteven Rostedt
ftrace_profile_init(void)699cafb168aSSteven Rostedt static int ftrace_profile_init(void)
700cafb168aSSteven Rostedt {
701cafb168aSSteven Rostedt int cpu;
702cafb168aSSteven Rostedt int ret = 0;
703cafb168aSSteven Rostedt
704c4602c1cSMiao Xie for_each_possible_cpu(cpu) {
705cafb168aSSteven Rostedt ret = ftrace_profile_init_cpu(cpu);
706cafb168aSSteven Rostedt if (ret)
707cafb168aSSteven Rostedt break;
708cafb168aSSteven Rostedt }
709cafb168aSSteven Rostedt
710cafb168aSSteven Rostedt return ret;
711cafb168aSSteven Rostedt }
712cafb168aSSteven Rostedt
713493762fcSSteven Rostedt /* interrupts must be disabled */
714cafb168aSSteven Rostedt static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)715cafb168aSSteven Rostedt ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
716493762fcSSteven Rostedt {
717493762fcSSteven Rostedt struct ftrace_profile *rec;
718493762fcSSteven Rostedt struct hlist_head *hhd;
719493762fcSSteven Rostedt unsigned long key;
720493762fcSSteven Rostedt
72120079ebeSNamhyung Kim key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
722cafb168aSSteven Rostedt hhd = &stat->hash[key];
723493762fcSSteven Rostedt
724493762fcSSteven Rostedt if (hlist_empty(hhd))
725493762fcSSteven Rostedt return NULL;
726493762fcSSteven Rostedt
7271bb539caSSteven Rostedt hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
728493762fcSSteven Rostedt if (rec->ip == ip)
729493762fcSSteven Rostedt return rec;
730493762fcSSteven Rostedt }
731493762fcSSteven Rostedt
732493762fcSSteven Rostedt return NULL;
733493762fcSSteven Rostedt }
734493762fcSSteven Rostedt
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)735cafb168aSSteven Rostedt static void ftrace_add_profile(struct ftrace_profile_stat *stat,
736cafb168aSSteven Rostedt struct ftrace_profile *rec)
737493762fcSSteven Rostedt {
738493762fcSSteven Rostedt unsigned long key;
739493762fcSSteven Rostedt
74020079ebeSNamhyung Kim key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
741cafb168aSSteven Rostedt hlist_add_head_rcu(&rec->node, &stat->hash[key]);
742493762fcSSteven Rostedt }
743493762fcSSteven Rostedt
744318e0a73SSteven Rostedt /*
745318e0a73SSteven Rostedt * The memory is already allocated, this simply finds a new record to use.
746318e0a73SSteven Rostedt */
747493762fcSSteven Rostedt static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)748318e0a73SSteven Rostedt ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
749493762fcSSteven Rostedt {
750493762fcSSteven Rostedt struct ftrace_profile *rec = NULL;
751493762fcSSteven Rostedt
752318e0a73SSteven Rostedt /* prevent recursion (from NMIs) */
753cafb168aSSteven Rostedt if (atomic_inc_return(&stat->disabled) != 1)
754493762fcSSteven Rostedt goto out;
755493762fcSSteven Rostedt
756493762fcSSteven Rostedt /*
757318e0a73SSteven Rostedt * Try to find the function again since an NMI
758318e0a73SSteven Rostedt * could have added it
759493762fcSSteven Rostedt */
760cafb168aSSteven Rostedt rec = ftrace_find_profiled_func(stat, ip);
761493762fcSSteven Rostedt if (rec)
762cafb168aSSteven Rostedt goto out;
763493762fcSSteven Rostedt
764cafb168aSSteven Rostedt if (stat->pages->index == PROFILES_PER_PAGE) {
765cafb168aSSteven Rostedt if (!stat->pages->next)
766cafb168aSSteven Rostedt goto out;
767cafb168aSSteven Rostedt stat->pages = stat->pages->next;
768493762fcSSteven Rostedt }
769493762fcSSteven Rostedt
770cafb168aSSteven Rostedt rec = &stat->pages->records[stat->pages->index++];
771493762fcSSteven Rostedt rec->ip = ip;
772cafb168aSSteven Rostedt ftrace_add_profile(stat, rec);
773493762fcSSteven Rostedt
774493762fcSSteven Rostedt out:
775cafb168aSSteven Rostedt atomic_dec(&stat->disabled);
776493762fcSSteven Rostedt
777493762fcSSteven Rostedt return rec;
778493762fcSSteven Rostedt }
779493762fcSSteven Rostedt
780493762fcSSteven Rostedt static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)7812f5f6ad9SSteven Rostedt function_profile_call(unsigned long ip, unsigned long parent_ip,
782d19ad077SSteven Rostedt (VMware) struct ftrace_ops *ops, struct ftrace_regs *fregs)
783493762fcSSteven Rostedt {
784cafb168aSSteven Rostedt struct ftrace_profile_stat *stat;
785493762fcSSteven Rostedt struct ftrace_profile *rec;
786493762fcSSteven Rostedt unsigned long flags;
787493762fcSSteven Rostedt
788493762fcSSteven Rostedt if (!ftrace_profile_enabled)
789493762fcSSteven Rostedt return;
790493762fcSSteven Rostedt
791493762fcSSteven Rostedt local_irq_save(flags);
792cafb168aSSteven Rostedt
793bdffd893SChristoph Lameter stat = this_cpu_ptr(&ftrace_profile_stats);
7940f6ce3deSSteven Rostedt if (!stat->hash || !ftrace_profile_enabled)
795cafb168aSSteven Rostedt goto out;
796cafb168aSSteven Rostedt
797cafb168aSSteven Rostedt rec = ftrace_find_profiled_func(stat, ip);
798493762fcSSteven Rostedt if (!rec) {
799318e0a73SSteven Rostedt rec = ftrace_profile_alloc(stat, ip);
800493762fcSSteven Rostedt if (!rec)
801493762fcSSteven Rostedt goto out;
802493762fcSSteven Rostedt }
803493762fcSSteven Rostedt
804493762fcSSteven Rostedt rec->counter++;
805493762fcSSteven Rostedt out:
806493762fcSSteven Rostedt local_irq_restore(flags);
807493762fcSSteven Rostedt }
808493762fcSSteven Rostedt
8090706f1c4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
810e73e679fSSteven Rostedt (VMware) static bool fgraph_graph_time = true;
811e73e679fSSteven Rostedt (VMware)
ftrace_graph_graph_time_control(bool enable)812e73e679fSSteven Rostedt (VMware) void ftrace_graph_graph_time_control(bool enable)
813e73e679fSSteven Rostedt (VMware) {
814e73e679fSSteven Rostedt (VMware) fgraph_graph_time = enable;
815e73e679fSSteven Rostedt (VMware) }
816e73e679fSSteven Rostedt (VMware)
profile_graph_entry(struct ftrace_graph_ent * trace)8170706f1c4SSteven Rostedt static int profile_graph_entry(struct ftrace_graph_ent *trace)
8180706f1c4SSteven Rostedt {
819b0e21a61SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
8208861dd30SNamhyung Kim
821a1e2e31dSSteven Rostedt function_profile_call(trace->func, 0, NULL, NULL);
8228861dd30SNamhyung Kim
823a8f0f9e4SSteven Rostedt (VMware) /* If function graph is shutting down, ret_stack can be NULL */
824a8f0f9e4SSteven Rostedt (VMware) if (!current->ret_stack)
825a8f0f9e4SSteven Rostedt (VMware) return 0;
826a8f0f9e4SSteven Rostedt (VMware)
827b0e21a61SSteven Rostedt (VMware) ret_stack = ftrace_graph_get_ret_stack(current, 0);
828b0e21a61SSteven Rostedt (VMware) if (ret_stack)
829b0e21a61SSteven Rostedt (VMware) ret_stack->subtime = 0;
8308861dd30SNamhyung Kim
8310706f1c4SSteven Rostedt return 1;
8320706f1c4SSteven Rostedt }
8330706f1c4SSteven Rostedt
profile_graph_return(struct ftrace_graph_ret * trace)8340706f1c4SSteven Rostedt static void profile_graph_return(struct ftrace_graph_ret *trace)
8350706f1c4SSteven Rostedt {
836b0e21a61SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
837cafb168aSSteven Rostedt struct ftrace_profile_stat *stat;
838a2a16d6aSSteven Rostedt unsigned long long calltime;
8390706f1c4SSteven Rostedt struct ftrace_profile *rec;
840cafb168aSSteven Rostedt unsigned long flags;
8410706f1c4SSteven Rostedt
8420706f1c4SSteven Rostedt local_irq_save(flags);
843bdffd893SChristoph Lameter stat = this_cpu_ptr(&ftrace_profile_stats);
8440f6ce3deSSteven Rostedt if (!stat->hash || !ftrace_profile_enabled)
845cafb168aSSteven Rostedt goto out;
846cafb168aSSteven Rostedt
84737e44bc5SSteven Rostedt /* If the calltime was zero'd ignore it */
84837e44bc5SSteven Rostedt if (!trace->calltime)
84937e44bc5SSteven Rostedt goto out;
85037e44bc5SSteven Rostedt
851a2a16d6aSSteven Rostedt calltime = trace->rettime - trace->calltime;
852a2a16d6aSSteven Rostedt
85355577204SSteven Rostedt (Red Hat) if (!fgraph_graph_time) {
854a2a16d6aSSteven Rostedt
855a2a16d6aSSteven Rostedt /* Append this call time to the parent time to subtract */
856b0e21a61SSteven Rostedt (VMware) ret_stack = ftrace_graph_get_ret_stack(current, 1);
857b0e21a61SSteven Rostedt (VMware) if (ret_stack)
858b0e21a61SSteven Rostedt (VMware) ret_stack->subtime += calltime;
859a2a16d6aSSteven Rostedt
860b0e21a61SSteven Rostedt (VMware) ret_stack = ftrace_graph_get_ret_stack(current, 0);
861b0e21a61SSteven Rostedt (VMware) if (ret_stack && ret_stack->subtime < calltime)
862b0e21a61SSteven Rostedt (VMware) calltime -= ret_stack->subtime;
863a2a16d6aSSteven Rostedt else
864a2a16d6aSSteven Rostedt calltime = 0;
865a2a16d6aSSteven Rostedt }
866a2a16d6aSSteven Rostedt
867cafb168aSSteven Rostedt rec = ftrace_find_profiled_func(stat, trace->func);
868e330b3bcSChase Douglas if (rec) {
869a2a16d6aSSteven Rostedt rec->time += calltime;
870e330b3bcSChase Douglas rec->time_squared += calltime * calltime;
871e330b3bcSChase Douglas }
872a2a16d6aSSteven Rostedt
873cafb168aSSteven Rostedt out:
8740706f1c4SSteven Rostedt local_irq_restore(flags);
8750706f1c4SSteven Rostedt }
8760706f1c4SSteven Rostedt
877688f7089SSteven Rostedt (VMware) static struct fgraph_ops fprofiler_ops = {
878688f7089SSteven Rostedt (VMware) .entryfunc = &profile_graph_entry,
879688f7089SSteven Rostedt (VMware) .retfunc = &profile_graph_return,
880688f7089SSteven Rostedt (VMware) };
881688f7089SSteven Rostedt (VMware)
register_ftrace_profiler(void)8820706f1c4SSteven Rostedt static int register_ftrace_profiler(void)
8830706f1c4SSteven Rostedt {
884688f7089SSteven Rostedt (VMware) return register_ftrace_graph(&fprofiler_ops);
8850706f1c4SSteven Rostedt }
8860706f1c4SSteven Rostedt
unregister_ftrace_profiler(void)8870706f1c4SSteven Rostedt static void unregister_ftrace_profiler(void)
8880706f1c4SSteven Rostedt {
889688f7089SSteven Rostedt (VMware) unregister_ftrace_graph(&fprofiler_ops);
8900706f1c4SSteven Rostedt }
8910706f1c4SSteven Rostedt #else
892bd38c0e6SPaul McQuade static struct ftrace_ops ftrace_profile_ops __read_mostly = {
893493762fcSSteven Rostedt .func = function_profile_call,
894a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_INITIALIZED,
89533b7f99cSSteven Rostedt (Red Hat) INIT_OPS_HASH(ftrace_profile_ops)
896493762fcSSteven Rostedt };
897493762fcSSteven Rostedt
register_ftrace_profiler(void)8980706f1c4SSteven Rostedt static int register_ftrace_profiler(void)
8990706f1c4SSteven Rostedt {
9000706f1c4SSteven Rostedt return register_ftrace_function(&ftrace_profile_ops);
9010706f1c4SSteven Rostedt }
9020706f1c4SSteven Rostedt
unregister_ftrace_profiler(void)9030706f1c4SSteven Rostedt static void unregister_ftrace_profiler(void)
9040706f1c4SSteven Rostedt {
9050706f1c4SSteven Rostedt unregister_ftrace_function(&ftrace_profile_ops);
9060706f1c4SSteven Rostedt }
9070706f1c4SSteven Rostedt #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
9080706f1c4SSteven Rostedt
909493762fcSSteven Rostedt static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)910493762fcSSteven Rostedt ftrace_profile_write(struct file *filp, const char __user *ubuf,
911493762fcSSteven Rostedt size_t cnt, loff_t *ppos)
912493762fcSSteven Rostedt {
913493762fcSSteven Rostedt unsigned long val;
914493762fcSSteven Rostedt int ret;
915493762fcSSteven Rostedt
91622fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
91722fe9b54SPeter Huewe if (ret)
918493762fcSSteven Rostedt return ret;
919493762fcSSteven Rostedt
920493762fcSSteven Rostedt val = !!val;
921493762fcSSteven Rostedt
922493762fcSSteven Rostedt mutex_lock(&ftrace_profile_lock);
923493762fcSSteven Rostedt if (ftrace_profile_enabled ^ val) {
924493762fcSSteven Rostedt if (val) {
925493762fcSSteven Rostedt ret = ftrace_profile_init();
926493762fcSSteven Rostedt if (ret < 0) {
927493762fcSSteven Rostedt cnt = ret;
928493762fcSSteven Rostedt goto out;
929493762fcSSteven Rostedt }
930493762fcSSteven Rostedt
9310706f1c4SSteven Rostedt ret = register_ftrace_profiler();
9320706f1c4SSteven Rostedt if (ret < 0) {
9330706f1c4SSteven Rostedt cnt = ret;
9340706f1c4SSteven Rostedt goto out;
9350706f1c4SSteven Rostedt }
936493762fcSSteven Rostedt ftrace_profile_enabled = 1;
937493762fcSSteven Rostedt } else {
938493762fcSSteven Rostedt ftrace_profile_enabled = 0;
9390f6ce3deSSteven Rostedt /*
9400f6ce3deSSteven Rostedt * unregister_ftrace_profiler calls stop_machine
94174401729SPaul E. McKenney * so this acts like an synchronize_rcu.
9420f6ce3deSSteven Rostedt */
9430706f1c4SSteven Rostedt unregister_ftrace_profiler();
944493762fcSSteven Rostedt }
945493762fcSSteven Rostedt }
946493762fcSSteven Rostedt out:
947493762fcSSteven Rostedt mutex_unlock(&ftrace_profile_lock);
948493762fcSSteven Rostedt
949cf8517cfSJiri Olsa *ppos += cnt;
950493762fcSSteven Rostedt
951493762fcSSteven Rostedt return cnt;
952493762fcSSteven Rostedt }
953493762fcSSteven Rostedt
954493762fcSSteven Rostedt static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)955493762fcSSteven Rostedt ftrace_profile_read(struct file *filp, char __user *ubuf,
956493762fcSSteven Rostedt size_t cnt, loff_t *ppos)
957493762fcSSteven Rostedt {
958fb9fb015SSteven Rostedt char buf[64]; /* big enough to hold a number */
959493762fcSSteven Rostedt int r;
960493762fcSSteven Rostedt
961493762fcSSteven Rostedt r = sprintf(buf, "%u\n", ftrace_profile_enabled);
962493762fcSSteven Rostedt return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
963493762fcSSteven Rostedt }
964493762fcSSteven Rostedt
965493762fcSSteven Rostedt static const struct file_operations ftrace_profile_fops = {
966493762fcSSteven Rostedt .open = tracing_open_generic,
967493762fcSSteven Rostedt .read = ftrace_profile_read,
968493762fcSSteven Rostedt .write = ftrace_profile_write,
9696038f373SArnd Bergmann .llseek = default_llseek,
970493762fcSSteven Rostedt };
971493762fcSSteven Rostedt
972cafb168aSSteven Rostedt /* used to initialize the real stat files */
973cafb168aSSteven Rostedt static struct tracer_stat function_stats __initdata = {
974cafb168aSSteven Rostedt .name = "functions",
975cafb168aSSteven Rostedt .stat_start = function_stat_start,
976cafb168aSSteven Rostedt .stat_next = function_stat_next,
977cafb168aSSteven Rostedt .stat_cmp = function_stat_cmp,
978cafb168aSSteven Rostedt .stat_headers = function_stat_headers,
979cafb168aSSteven Rostedt .stat_show = function_stat_show
980cafb168aSSteven Rostedt };
981cafb168aSSteven Rostedt
ftrace_profile_tracefs(struct dentry * d_tracer)9828434dc93SSteven Rostedt (Red Hat) static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
983493762fcSSteven Rostedt {
984cafb168aSSteven Rostedt struct ftrace_profile_stat *stat;
985cafb168aSSteven Rostedt char *name;
986493762fcSSteven Rostedt int ret;
987cafb168aSSteven Rostedt int cpu;
988493762fcSSteven Rostedt
989cafb168aSSteven Rostedt for_each_possible_cpu(cpu) {
990cafb168aSSteven Rostedt stat = &per_cpu(ftrace_profile_stats, cpu);
991cafb168aSSteven Rostedt
9926363c6b5SGeliang Tang name = kasprintf(GFP_KERNEL, "function%d", cpu);
993cafb168aSSteven Rostedt if (!name) {
994cafb168aSSteven Rostedt /*
995cafb168aSSteven Rostedt * The files created are permanent, if something happens
996cafb168aSSteven Rostedt * we still do not free memory.
997cafb168aSSteven Rostedt */
998cafb168aSSteven Rostedt WARN(1,
999cafb168aSSteven Rostedt "Could not allocate stat file for cpu %d\n",
1000cafb168aSSteven Rostedt cpu);
1001493762fcSSteven Rostedt return;
1002493762fcSSteven Rostedt }
1003cafb168aSSteven Rostedt stat->stat = function_stats;
1004cafb168aSSteven Rostedt stat->stat.name = name;
1005cafb168aSSteven Rostedt ret = register_stat_tracer(&stat->stat);
1006cafb168aSSteven Rostedt if (ret) {
1007cafb168aSSteven Rostedt WARN(1,
1008cafb168aSSteven Rostedt "Could not register function stat for cpu %d\n",
1009cafb168aSSteven Rostedt cpu);
1010cafb168aSSteven Rostedt kfree(name);
1011cafb168aSSteven Rostedt return;
1012cafb168aSSteven Rostedt }
1013cafb168aSSteven Rostedt }
1014493762fcSSteven Rostedt
1015e4931b82SYuntao Wang trace_create_file("function_profile_enabled",
101621ccc9cdSSteven Rostedt (VMware) TRACE_MODE_WRITE, d_tracer, NULL,
101721ccc9cdSSteven Rostedt (VMware) &ftrace_profile_fops);
1018493762fcSSteven Rostedt }
1019493762fcSSteven Rostedt
1020493762fcSSteven Rostedt #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)10218434dc93SSteven Rostedt (Red Hat) static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1022493762fcSSteven Rostedt {
1023493762fcSSteven Rostedt }
1024493762fcSSteven Rostedt #endif /* CONFIG_FUNCTION_PROFILER */
1025493762fcSSteven Rostedt
10263d083395SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
102773d3fd96SIngo Molnar
102879922b80SSteven Rostedt (Red Hat) static struct ftrace_ops *removed_ops;
102979922b80SSteven Rostedt (Red Hat)
1030e1effa01SSteven Rostedt (Red Hat) /*
1031e1effa01SSteven Rostedt (Red Hat) * Set when doing a global update, like enabling all recs or disabling them.
1032e1effa01SSteven Rostedt (Red Hat) * It is not set when just updating a single ftrace_ops.
1033e1effa01SSteven Rostedt (Red Hat) */
1034e1effa01SSteven Rostedt (Red Hat) static bool update_all_ops;
1035e1effa01SSteven Rostedt (Red Hat)
103699ecdc43SSteven Rostedt #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1037cb7be3b2SSteven Rostedt # error Dynamic ftrace depends on MCOUNT_RECORD
103899ecdc43SSteven Rostedt #endif
103999ecdc43SSteven Rostedt
10407b60f3d8SSteven Rostedt (VMware) struct ftrace_func_probe {
10417b60f3d8SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops;
10427b60f3d8SSteven Rostedt (VMware) struct ftrace_ops ops;
10437b60f3d8SSteven Rostedt (VMware) struct trace_array *tr;
10447b60f3d8SSteven Rostedt (VMware) struct list_head list;
10456e444319SSteven Rostedt (VMware) void *data;
10467b60f3d8SSteven Rostedt (VMware) int ref;
10477b60f3d8SSteven Rostedt (VMware) };
10487b60f3d8SSteven Rostedt (VMware)
104933dc9b12SSteven Rostedt /*
105033dc9b12SSteven Rostedt * We make these constant because no one should touch them,
105133dc9b12SSteven Rostedt * but they are used as the default "empty hash", to avoid allocating
105233dc9b12SSteven Rostedt * it all the time. These are in a read only section such that if
105333dc9b12SSteven Rostedt * anyone does try to modify it, it will cause an exception.
105433dc9b12SSteven Rostedt */
105533dc9b12SSteven Rostedt static const struct hlist_head empty_buckets[1];
105633dc9b12SSteven Rostedt static const struct ftrace_hash empty_hash = {
105733dc9b12SSteven Rostedt .buckets = (struct hlist_head *)empty_buckets,
1058b448c4e3SSteven Rostedt };
105933dc9b12SSteven Rostedt #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
10605072c59fSSteven Rostedt
10613306fc4aSSteven Rostedt (VMware) struct ftrace_ops global_ops = {
1062f45948e8SSteven Rostedt .func = ftrace_stub,
106333b7f99cSSteven Rostedt (Red Hat) .local_hash.notrace_hash = EMPTY_HASH,
106433b7f99cSSteven Rostedt (Red Hat) .local_hash.filter_hash = EMPTY_HASH,
106533b7f99cSSteven Rostedt (Red Hat) INIT_OPS_HASH(global_ops)
1066a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_INITIALIZED |
1067e3eea140SSteven Rostedt (Red Hat) FTRACE_OPS_FL_PID,
1068f45948e8SSteven Rostedt };
1069f45948e8SSteven Rostedt
1070aec0be2dSSteven Rostedt (Red Hat) /*
1071f2cc020dSIngo Molnar * Used by the stack unwinder to know about dynamic ftrace trampolines.
1072aec0be2dSSteven Rostedt (Red Hat) */
ftrace_ops_trampoline(unsigned long addr)10736be7fa3cSSteven Rostedt (VMware) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1074aec0be2dSSteven Rostedt (Red Hat) {
10756be7fa3cSSteven Rostedt (VMware) struct ftrace_ops *op = NULL;
1076aec0be2dSSteven Rostedt (Red Hat)
1077aec0be2dSSteven Rostedt (Red Hat) /*
1078aec0be2dSSteven Rostedt (Red Hat) * Some of the ops may be dynamically allocated,
107974401729SPaul E. McKenney * they are freed after a synchronize_rcu().
1080aec0be2dSSteven Rostedt (Red Hat) */
1081aec0be2dSSteven Rostedt (Red Hat) preempt_disable_notrace();
1082aec0be2dSSteven Rostedt (Red Hat)
1083aec0be2dSSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
1084aec0be2dSSteven Rostedt (Red Hat) /*
1085aec0be2dSSteven Rostedt (Red Hat) * This is to check for dynamically allocated trampolines.
1086aec0be2dSSteven Rostedt (Red Hat) * Trampolines that are in kernel text will have
1087aec0be2dSSteven Rostedt (Red Hat) * core_kernel_text() return true.
1088aec0be2dSSteven Rostedt (Red Hat) */
1089aec0be2dSSteven Rostedt (Red Hat) if (op->trampoline && op->trampoline_size)
1090aec0be2dSSteven Rostedt (Red Hat) if (addr >= op->trampoline &&
1091aec0be2dSSteven Rostedt (Red Hat) addr < op->trampoline + op->trampoline_size) {
10926be7fa3cSSteven Rostedt (VMware) preempt_enable_notrace();
10936be7fa3cSSteven Rostedt (VMware) return op;
1094aec0be2dSSteven Rostedt (Red Hat) }
1095aec0be2dSSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
1096aec0be2dSSteven Rostedt (Red Hat) preempt_enable_notrace();
1097aec0be2dSSteven Rostedt (Red Hat)
10986be7fa3cSSteven Rostedt (VMware) return NULL;
10996be7fa3cSSteven Rostedt (VMware) }
11006be7fa3cSSteven Rostedt (VMware)
11016be7fa3cSSteven Rostedt (VMware) /*
11026be7fa3cSSteven Rostedt (VMware) * This is used by __kernel_text_address() to return true if the
11036be7fa3cSSteven Rostedt (VMware) * address is on a dynamically allocated trampoline that would
11046be7fa3cSSteven Rostedt (VMware) * not return true for either core_kernel_text() or
11056be7fa3cSSteven Rostedt (VMware) * is_module_text_address().
11066be7fa3cSSteven Rostedt (VMware) */
is_ftrace_trampoline(unsigned long addr)11076be7fa3cSSteven Rostedt (VMware) bool is_ftrace_trampoline(unsigned long addr)
11086be7fa3cSSteven Rostedt (VMware) {
11096be7fa3cSSteven Rostedt (VMware) return ftrace_ops_trampoline(addr) != NULL;
1110aec0be2dSSteven Rostedt (Red Hat) }
1111aec0be2dSSteven Rostedt (Red Hat)
11123c1720f0SSteven Rostedt struct ftrace_page {
11133c1720f0SSteven Rostedt struct ftrace_page *next;
1114a7900875SSteven Rostedt struct dyn_ftrace *records;
1115431aa3fbSSteven Rostedt int index;
1116db42523bSLinus Torvalds int order;
1117aa5e5ceaSDavid Miller };
11183c1720f0SSteven Rostedt
1119a7900875SSteven Rostedt #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1120a7900875SSteven Rostedt #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
11213c1720f0SSteven Rostedt
11223c1720f0SSteven Rostedt static struct ftrace_page *ftrace_pages_start;
11233c1720f0SSteven Rostedt static struct ftrace_page *ftrace_pages;
11243c1720f0SSteven Rostedt
11252b0cce0eSSteven Rostedt (VMware) static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash * hash,unsigned long ip)11262b0cce0eSSteven Rostedt (VMware) ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
11272b0cce0eSSteven Rostedt (VMware) {
11282b0cce0eSSteven Rostedt (VMware) if (hash->size_bits > 0)
11292b0cce0eSSteven Rostedt (VMware) return hash_long(ip, hash->size_bits);
11302b0cce0eSSteven Rostedt (VMware)
11312b0cce0eSSteven Rostedt (VMware) return 0;
11322b0cce0eSSteven Rostedt (VMware) }
11332b0cce0eSSteven Rostedt (VMware)
11342b2c279cSSteven Rostedt (VMware) /* Only use this function if ftrace_hash_empty() has already been tested */
11352b2c279cSSteven Rostedt (VMware) static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)11362b2c279cSSteven Rostedt (VMware) __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1137b448c4e3SSteven Rostedt {
1138b448c4e3SSteven Rostedt unsigned long key;
1139b448c4e3SSteven Rostedt struct ftrace_func_entry *entry;
1140b448c4e3SSteven Rostedt struct hlist_head *hhd;
1141b448c4e3SSteven Rostedt
11422b0cce0eSSteven Rostedt (VMware) key = ftrace_hash_key(hash, ip);
1143b448c4e3SSteven Rostedt hhd = &hash->buckets[key];
1144b448c4e3SSteven Rostedt
11451bb539caSSteven Rostedt hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1146b448c4e3SSteven Rostedt if (entry->ip == ip)
1147b448c4e3SSteven Rostedt return entry;
1148b448c4e3SSteven Rostedt }
1149b448c4e3SSteven Rostedt return NULL;
1150b448c4e3SSteven Rostedt }
1151b448c4e3SSteven Rostedt
11522b2c279cSSteven Rostedt (VMware) /**
11532b2c279cSSteven Rostedt (VMware) * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
11542b2c279cSSteven Rostedt (VMware) * @hash: The hash to look at
11552b2c279cSSteven Rostedt (VMware) * @ip: The instruction pointer to test
11562b2c279cSSteven Rostedt (VMware) *
11572b2c279cSSteven Rostedt (VMware) * Search a given @hash to see if a given instruction pointer (@ip)
11582b2c279cSSteven Rostedt (VMware) * exists in it.
11592b2c279cSSteven Rostedt (VMware) *
11602b2c279cSSteven Rostedt (VMware) * Returns the entry that holds the @ip if found. NULL otherwise.
11612b2c279cSSteven Rostedt (VMware) */
11622b2c279cSSteven Rostedt (VMware) struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)11632b2c279cSSteven Rostedt (VMware) ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
11642b2c279cSSteven Rostedt (VMware) {
11652b2c279cSSteven Rostedt (VMware) if (ftrace_hash_empty(hash))
11662b2c279cSSteven Rostedt (VMware) return NULL;
11672b2c279cSSteven Rostedt (VMware)
11682b2c279cSSteven Rostedt (VMware) return __ftrace_lookup_ip(hash, ip);
11692b2c279cSSteven Rostedt (VMware) }
11702b2c279cSSteven Rostedt (VMware)
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)117133dc9b12SSteven Rostedt static void __add_hash_entry(struct ftrace_hash *hash,
117233dc9b12SSteven Rostedt struct ftrace_func_entry *entry)
117333dc9b12SSteven Rostedt {
117433dc9b12SSteven Rostedt struct hlist_head *hhd;
117533dc9b12SSteven Rostedt unsigned long key;
117633dc9b12SSteven Rostedt
11772b0cce0eSSteven Rostedt (VMware) key = ftrace_hash_key(hash, entry->ip);
117833dc9b12SSteven Rostedt hhd = &hash->buckets[key];
117933dc9b12SSteven Rostedt hlist_add_head(&entry->hlist, hhd);
118033dc9b12SSteven Rostedt hash->count++;
118133dc9b12SSteven Rostedt }
118233dc9b12SSteven Rostedt
1183a12754a8SSteven Rostedt (Google) static struct ftrace_func_entry *
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1184a12754a8SSteven Rostedt (Google) add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1185b448c4e3SSteven Rostedt {
1186b448c4e3SSteven Rostedt struct ftrace_func_entry *entry;
1187b448c4e3SSteven Rostedt
1188b448c4e3SSteven Rostedt entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1189b448c4e3SSteven Rostedt if (!entry)
1190a12754a8SSteven Rostedt (Google) return NULL;
1191b448c4e3SSteven Rostedt
1192b448c4e3SSteven Rostedt entry->ip = ip;
119333dc9b12SSteven Rostedt __add_hash_entry(hash, entry);
1194b448c4e3SSteven Rostedt
1195a12754a8SSteven Rostedt (Google) return entry;
1196b448c4e3SSteven Rostedt }
1197b448c4e3SSteven Rostedt
1198b448c4e3SSteven Rostedt static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)119933dc9b12SSteven Rostedt free_hash_entry(struct ftrace_hash *hash,
120033dc9b12SSteven Rostedt struct ftrace_func_entry *entry)
120133dc9b12SSteven Rostedt {
120233dc9b12SSteven Rostedt hlist_del(&entry->hlist);
120333dc9b12SSteven Rostedt kfree(entry);
120433dc9b12SSteven Rostedt hash->count--;
120533dc9b12SSteven Rostedt }
120633dc9b12SSteven Rostedt
120733dc9b12SSteven Rostedt static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1208b448c4e3SSteven Rostedt remove_hash_entry(struct ftrace_hash *hash,
1209b448c4e3SSteven Rostedt struct ftrace_func_entry *entry)
1210b448c4e3SSteven Rostedt {
1211eee8ded1SSteven Rostedt (VMware) hlist_del_rcu(&entry->hlist);
1212b448c4e3SSteven Rostedt hash->count--;
1213b448c4e3SSteven Rostedt }
1214b448c4e3SSteven Rostedt
ftrace_hash_clear(struct ftrace_hash * hash)1215b448c4e3SSteven Rostedt static void ftrace_hash_clear(struct ftrace_hash *hash)
1216b448c4e3SSteven Rostedt {
1217b448c4e3SSteven Rostedt struct hlist_head *hhd;
1218b67bfe0dSSasha Levin struct hlist_node *tn;
1219b448c4e3SSteven Rostedt struct ftrace_func_entry *entry;
1220b448c4e3SSteven Rostedt int size = 1 << hash->size_bits;
1221b448c4e3SSteven Rostedt int i;
1222b448c4e3SSteven Rostedt
122333dc9b12SSteven Rostedt if (!hash->count)
122433dc9b12SSteven Rostedt return;
122533dc9b12SSteven Rostedt
1226b448c4e3SSteven Rostedt for (i = 0; i < size; i++) {
1227b448c4e3SSteven Rostedt hhd = &hash->buckets[i];
1228b67bfe0dSSasha Levin hlist_for_each_entry_safe(entry, tn, hhd, hlist)
122933dc9b12SSteven Rostedt free_hash_entry(hash, entry);
1230b448c4e3SSteven Rostedt }
1231b448c4e3SSteven Rostedt FTRACE_WARN_ON(hash->count);
1232b448c4e3SSteven Rostedt }
1233b448c4e3SSteven Rostedt
free_ftrace_mod(struct ftrace_mod_load * ftrace_mod)1234673feb9dSSteven Rostedt (VMware) static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1235673feb9dSSteven Rostedt (VMware) {
1236673feb9dSSteven Rostedt (VMware) list_del(&ftrace_mod->list);
1237673feb9dSSteven Rostedt (VMware) kfree(ftrace_mod->module);
1238673feb9dSSteven Rostedt (VMware) kfree(ftrace_mod->func);
1239673feb9dSSteven Rostedt (VMware) kfree(ftrace_mod);
1240673feb9dSSteven Rostedt (VMware) }
1241673feb9dSSteven Rostedt (VMware)
clear_ftrace_mod_list(struct list_head * head)1242673feb9dSSteven Rostedt (VMware) static void clear_ftrace_mod_list(struct list_head *head)
1243673feb9dSSteven Rostedt (VMware) {
1244673feb9dSSteven Rostedt (VMware) struct ftrace_mod_load *p, *n;
1245673feb9dSSteven Rostedt (VMware)
1246673feb9dSSteven Rostedt (VMware) /* stack tracer isn't supported yet */
1247673feb9dSSteven Rostedt (VMware) if (!head)
1248673feb9dSSteven Rostedt (VMware) return;
1249673feb9dSSteven Rostedt (VMware)
1250673feb9dSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
1251673feb9dSSteven Rostedt (VMware) list_for_each_entry_safe(p, n, head, list)
1252673feb9dSSteven Rostedt (VMware) free_ftrace_mod(p);
1253673feb9dSSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
1254673feb9dSSteven Rostedt (VMware) }
1255673feb9dSSteven Rostedt (VMware)
free_ftrace_hash(struct ftrace_hash * hash)125633dc9b12SSteven Rostedt static void free_ftrace_hash(struct ftrace_hash *hash)
125733dc9b12SSteven Rostedt {
125833dc9b12SSteven Rostedt if (!hash || hash == EMPTY_HASH)
125933dc9b12SSteven Rostedt return;
126033dc9b12SSteven Rostedt ftrace_hash_clear(hash);
126133dc9b12SSteven Rostedt kfree(hash->buckets);
126233dc9b12SSteven Rostedt kfree(hash);
126333dc9b12SSteven Rostedt }
126433dc9b12SSteven Rostedt
__free_ftrace_hash_rcu(struct rcu_head * rcu)126507fd5515SSteven Rostedt static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
126607fd5515SSteven Rostedt {
126707fd5515SSteven Rostedt struct ftrace_hash *hash;
126807fd5515SSteven Rostedt
126907fd5515SSteven Rostedt hash = container_of(rcu, struct ftrace_hash, rcu);
127007fd5515SSteven Rostedt free_ftrace_hash(hash);
127107fd5515SSteven Rostedt }
127207fd5515SSteven Rostedt
free_ftrace_hash_rcu(struct ftrace_hash * hash)127307fd5515SSteven Rostedt static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
127407fd5515SSteven Rostedt {
127507fd5515SSteven Rostedt if (!hash || hash == EMPTY_HASH)
127607fd5515SSteven Rostedt return;
127774401729SPaul E. McKenney call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
127807fd5515SSteven Rostedt }
127907fd5515SSteven Rostedt
12808be9fbd5SMark Rutland /**
12818be9fbd5SMark Rutland * ftrace_free_filter - remove all filters for an ftrace_ops
12828be9fbd5SMark Rutland * @ops - the ops to remove the filters from
12838be9fbd5SMark Rutland */
ftrace_free_filter(struct ftrace_ops * ops)12845500fa51SJiri Olsa void ftrace_free_filter(struct ftrace_ops *ops)
12855500fa51SJiri Olsa {
1286f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
128733b7f99cSSteven Rostedt (Red Hat) free_ftrace_hash(ops->func_hash->filter_hash);
128833b7f99cSSteven Rostedt (Red Hat) free_ftrace_hash(ops->func_hash->notrace_hash);
12895500fa51SJiri Olsa }
12908be9fbd5SMark Rutland EXPORT_SYMBOL_GPL(ftrace_free_filter);
12915500fa51SJiri Olsa
alloc_ftrace_hash(int size_bits)129233dc9b12SSteven Rostedt static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
129333dc9b12SSteven Rostedt {
129433dc9b12SSteven Rostedt struct ftrace_hash *hash;
129533dc9b12SSteven Rostedt int size;
129633dc9b12SSteven Rostedt
129733dc9b12SSteven Rostedt hash = kzalloc(sizeof(*hash), GFP_KERNEL);
129833dc9b12SSteven Rostedt if (!hash)
129933dc9b12SSteven Rostedt return NULL;
130033dc9b12SSteven Rostedt
130133dc9b12SSteven Rostedt size = 1 << size_bits;
130247b0edcbSThomas Meyer hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
130333dc9b12SSteven Rostedt
130433dc9b12SSteven Rostedt if (!hash->buckets) {
130533dc9b12SSteven Rostedt kfree(hash);
130633dc9b12SSteven Rostedt return NULL;
130733dc9b12SSteven Rostedt }
130833dc9b12SSteven Rostedt
130933dc9b12SSteven Rostedt hash->size_bits = size_bits;
131033dc9b12SSteven Rostedt
131133dc9b12SSteven Rostedt return hash;
131233dc9b12SSteven Rostedt }
131333dc9b12SSteven Rostedt
1314673feb9dSSteven Rostedt (VMware)
ftrace_add_mod(struct trace_array * tr,const char * func,const char * module,int enable)1315673feb9dSSteven Rostedt (VMware) static int ftrace_add_mod(struct trace_array *tr,
1316673feb9dSSteven Rostedt (VMware) const char *func, const char *module,
1317673feb9dSSteven Rostedt (VMware) int enable)
1318673feb9dSSteven Rostedt (VMware) {
1319673feb9dSSteven Rostedt (VMware) struct ftrace_mod_load *ftrace_mod;
1320673feb9dSSteven Rostedt (VMware) struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1321673feb9dSSteven Rostedt (VMware)
1322673feb9dSSteven Rostedt (VMware) ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1323673feb9dSSteven Rostedt (VMware) if (!ftrace_mod)
1324673feb9dSSteven Rostedt (VMware) return -ENOMEM;
1325673feb9dSSteven Rostedt (VMware)
132619ba6c8aSXiu Jianfeng INIT_LIST_HEAD(&ftrace_mod->list);
1327673feb9dSSteven Rostedt (VMware) ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1328673feb9dSSteven Rostedt (VMware) ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1329673feb9dSSteven Rostedt (VMware) ftrace_mod->enable = enable;
1330673feb9dSSteven Rostedt (VMware)
1331673feb9dSSteven Rostedt (VMware) if (!ftrace_mod->func || !ftrace_mod->module)
1332673feb9dSSteven Rostedt (VMware) goto out_free;
1333673feb9dSSteven Rostedt (VMware)
1334673feb9dSSteven Rostedt (VMware) list_add(&ftrace_mod->list, mod_head);
1335673feb9dSSteven Rostedt (VMware)
1336673feb9dSSteven Rostedt (VMware) return 0;
1337673feb9dSSteven Rostedt (VMware)
1338673feb9dSSteven Rostedt (VMware) out_free:
1339673feb9dSSteven Rostedt (VMware) free_ftrace_mod(ftrace_mod);
1340673feb9dSSteven Rostedt (VMware)
1341673feb9dSSteven Rostedt (VMware) return -ENOMEM;
1342673feb9dSSteven Rostedt (VMware) }
1343673feb9dSSteven Rostedt (VMware)
134433dc9b12SSteven Rostedt static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)134533dc9b12SSteven Rostedt alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
134633dc9b12SSteven Rostedt {
134733dc9b12SSteven Rostedt struct ftrace_func_entry *entry;
134833dc9b12SSteven Rostedt struct ftrace_hash *new_hash;
134933dc9b12SSteven Rostedt int size;
135033dc9b12SSteven Rostedt int i;
135133dc9b12SSteven Rostedt
135233dc9b12SSteven Rostedt new_hash = alloc_ftrace_hash(size_bits);
135333dc9b12SSteven Rostedt if (!new_hash)
135433dc9b12SSteven Rostedt return NULL;
135533dc9b12SSteven Rostedt
13568c08f0d5SSteven Rostedt (VMware) if (hash)
13578c08f0d5SSteven Rostedt (VMware) new_hash->flags = hash->flags;
13588c08f0d5SSteven Rostedt (VMware)
135933dc9b12SSteven Rostedt /* Empty hash? */
136006a51d93SSteven Rostedt if (ftrace_hash_empty(hash))
136133dc9b12SSteven Rostedt return new_hash;
136233dc9b12SSteven Rostedt
136333dc9b12SSteven Rostedt size = 1 << hash->size_bits;
136433dc9b12SSteven Rostedt for (i = 0; i < size; i++) {
1365b67bfe0dSSasha Levin hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1366a12754a8SSteven Rostedt (Google) if (add_hash_entry(new_hash, entry->ip) == NULL)
136733dc9b12SSteven Rostedt goto free_hash;
136833dc9b12SSteven Rostedt }
136933dc9b12SSteven Rostedt }
137033dc9b12SSteven Rostedt
137133dc9b12SSteven Rostedt FTRACE_WARN_ON(new_hash->count != hash->count);
137233dc9b12SSteven Rostedt
137333dc9b12SSteven Rostedt return new_hash;
137433dc9b12SSteven Rostedt
137533dc9b12SSteven Rostedt free_hash:
137633dc9b12SSteven Rostedt free_ftrace_hash(new_hash);
137733dc9b12SSteven Rostedt return NULL;
137833dc9b12SSteven Rostedt }
137933dc9b12SSteven Rostedt
138041fb61c2SSteven Rostedt static void
138184261912SSteven Rostedt (Red Hat) ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
138241fb61c2SSteven Rostedt static void
138384261912SSteven Rostedt (Red Hat) ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
138441fb61c2SSteven Rostedt
1385f8b8be8aSMasami Hiramatsu static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1386f8b8be8aSMasami Hiramatsu struct ftrace_hash *new_hash);
1387f8b8be8aSMasami Hiramatsu
dup_hash(struct ftrace_hash * src,int size)1388714641c3SSteven Rostedt (VMware) static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
138933dc9b12SSteven Rostedt {
139033dc9b12SSteven Rostedt struct ftrace_func_entry *entry;
139107fd5515SSteven Rostedt struct ftrace_hash *new_hash;
1392714641c3SSteven Rostedt (VMware) struct hlist_head *hhd;
1393714641c3SSteven Rostedt (VMware) struct hlist_node *tn;
139433dc9b12SSteven Rostedt int bits = 0;
139533dc9b12SSteven Rostedt int i;
139633dc9b12SSteven Rostedt
139733dc9b12SSteven Rostedt /*
1398be493132SSteven Rostedt (VMware) * Use around half the size (max bit of it), but
1399be493132SSteven Rostedt (VMware) * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
140033dc9b12SSteven Rostedt */
1401be493132SSteven Rostedt (VMware) bits = fls(size / 2);
140233dc9b12SSteven Rostedt
140333dc9b12SSteven Rostedt /* Don't allocate too much */
140433dc9b12SSteven Rostedt if (bits > FTRACE_HASH_MAX_BITS)
140533dc9b12SSteven Rostedt bits = FTRACE_HASH_MAX_BITS;
140633dc9b12SSteven Rostedt
140707fd5515SSteven Rostedt new_hash = alloc_ftrace_hash(bits);
140807fd5515SSteven Rostedt if (!new_hash)
14093e278c0dSNamhyung Kim return NULL;
141033dc9b12SSteven Rostedt
14118c08f0d5SSteven Rostedt (VMware) new_hash->flags = src->flags;
14128c08f0d5SSteven Rostedt (VMware)
141333dc9b12SSteven Rostedt size = 1 << src->size_bits;
141433dc9b12SSteven Rostedt for (i = 0; i < size; i++) {
141533dc9b12SSteven Rostedt hhd = &src->buckets[i];
1416b67bfe0dSSasha Levin hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
141733dc9b12SSteven Rostedt remove_hash_entry(src, entry);
141807fd5515SSteven Rostedt __add_hash_entry(new_hash, entry);
141933dc9b12SSteven Rostedt }
142033dc9b12SSteven Rostedt }
14213e278c0dSNamhyung Kim return new_hash;
14223e278c0dSNamhyung Kim }
14233e278c0dSNamhyung Kim
1424714641c3SSteven Rostedt (VMware) static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash * src)1425714641c3SSteven Rostedt (VMware) __ftrace_hash_move(struct ftrace_hash *src)
1426714641c3SSteven Rostedt (VMware) {
1427714641c3SSteven Rostedt (VMware) int size = src->count;
1428714641c3SSteven Rostedt (VMware)
1429714641c3SSteven Rostedt (VMware) /*
1430714641c3SSteven Rostedt (VMware) * If the new source is empty, just return the empty_hash.
1431714641c3SSteven Rostedt (VMware) */
1432714641c3SSteven Rostedt (VMware) if (ftrace_hash_empty(src))
1433714641c3SSteven Rostedt (VMware) return EMPTY_HASH;
1434714641c3SSteven Rostedt (VMware)
1435714641c3SSteven Rostedt (VMware) return dup_hash(src, size);
1436714641c3SSteven Rostedt (VMware) }
1437714641c3SSteven Rostedt (VMware)
14383e278c0dSNamhyung Kim static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)14393e278c0dSNamhyung Kim ftrace_hash_move(struct ftrace_ops *ops, int enable,
14403e278c0dSNamhyung Kim struct ftrace_hash **dst, struct ftrace_hash *src)
14413e278c0dSNamhyung Kim {
14423e278c0dSNamhyung Kim struct ftrace_hash *new_hash;
14433e278c0dSNamhyung Kim int ret;
14443e278c0dSNamhyung Kim
14453e278c0dSNamhyung Kim /* Reject setting notrace hash on IPMODIFY ftrace_ops */
14463e278c0dSNamhyung Kim if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
14473e278c0dSNamhyung Kim return -EINVAL;
14483e278c0dSNamhyung Kim
14493e278c0dSNamhyung Kim new_hash = __ftrace_hash_move(src);
14503e278c0dSNamhyung Kim if (!new_hash)
14513e278c0dSNamhyung Kim return -ENOMEM;
14523e278c0dSNamhyung Kim
1453f8b8be8aSMasami Hiramatsu /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1454f8b8be8aSMasami Hiramatsu if (enable) {
1455f8b8be8aSMasami Hiramatsu /* IPMODIFY should be updated only when filter_hash updating */
1456f8b8be8aSMasami Hiramatsu ret = ftrace_hash_ipmodify_update(ops, new_hash);
1457f8b8be8aSMasami Hiramatsu if (ret < 0) {
1458f8b8be8aSMasami Hiramatsu free_ftrace_hash(new_hash);
1459f8b8be8aSMasami Hiramatsu return ret;
1460f8b8be8aSMasami Hiramatsu }
1461f8b8be8aSMasami Hiramatsu }
1462f8b8be8aSMasami Hiramatsu
14635c27c775SMasami Hiramatsu /*
14645c27c775SMasami Hiramatsu * Remove the current set, update the hash and add
14655c27c775SMasami Hiramatsu * them back.
14665c27c775SMasami Hiramatsu */
146784261912SSteven Rostedt (Red Hat) ftrace_hash_rec_disable_modify(ops, enable);
14685c27c775SMasami Hiramatsu
146907fd5515SSteven Rostedt rcu_assign_pointer(*dst, new_hash);
147007fd5515SSteven Rostedt
147184261912SSteven Rostedt (Red Hat) ftrace_hash_rec_enable_modify(ops, enable);
147241fb61c2SSteven Rostedt
14735c27c775SMasami Hiramatsu return 0;
147433dc9b12SSteven Rostedt }
147533dc9b12SSteven Rostedt
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1476fef5aeeeSSteven Rostedt (Red Hat) static bool hash_contains_ip(unsigned long ip,
1477fef5aeeeSSteven Rostedt (Red Hat) struct ftrace_ops_hash *hash)
1478fef5aeeeSSteven Rostedt (Red Hat) {
1479fef5aeeeSSteven Rostedt (Red Hat) /*
1480fef5aeeeSSteven Rostedt (Red Hat) * The function record is a match if it exists in the filter
1481fdda88d3SQiujun Huang * hash and not in the notrace hash. Note, an empty hash is
1482fef5aeeeSSteven Rostedt (Red Hat) * considered a match for the filter hash, but an empty
1483fef5aeeeSSteven Rostedt (Red Hat) * notrace hash is considered not in the notrace hash.
1484fef5aeeeSSteven Rostedt (Red Hat) */
1485fef5aeeeSSteven Rostedt (Red Hat) return (ftrace_hash_empty(hash->filter_hash) ||
14862b2c279cSSteven Rostedt (VMware) __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1487fef5aeeeSSteven Rostedt (Red Hat) (ftrace_hash_empty(hash->notrace_hash) ||
14882b2c279cSSteven Rostedt (VMware) !__ftrace_lookup_ip(hash->notrace_hash, ip));
1489fef5aeeeSSteven Rostedt (Red Hat) }
1490fef5aeeeSSteven Rostedt (Red Hat)
1491265c831cSSteven Rostedt /*
1492b848914cSSteven Rostedt * Test the hashes for this ops to see if we want to call
1493b848914cSSteven Rostedt * the ops->func or not.
1494b848914cSSteven Rostedt *
1495b848914cSSteven Rostedt * It's a match if the ip is in the ops->filter_hash or
1496b848914cSSteven Rostedt * the filter_hash does not exist or is empty,
1497b848914cSSteven Rostedt * AND
1498b848914cSSteven Rostedt * the ip is not in the ops->notrace_hash.
1499cdbe61bfSSteven Rostedt *
1500cdbe61bfSSteven Rostedt * This needs to be called with preemption disabled as
150174401729SPaul E. McKenney * the hashes are freed with call_rcu().
1502b848914cSSteven Rostedt */
15033306fc4aSSteven Rostedt (VMware) int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1504195a8afcSSteven Rostedt (Red Hat) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1505b848914cSSteven Rostedt {
1506fef5aeeeSSteven Rostedt (Red Hat) struct ftrace_ops_hash hash;
1507b848914cSSteven Rostedt int ret;
1508b848914cSSteven Rostedt
1509195a8afcSSteven Rostedt (Red Hat) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1510195a8afcSSteven Rostedt (Red Hat) /*
1511195a8afcSSteven Rostedt (Red Hat) * There's a small race when adding ops that the ftrace handler
1512195a8afcSSteven Rostedt (Red Hat) * that wants regs, may be called without them. We can not
1513195a8afcSSteven Rostedt (Red Hat) * allow that handler to be called if regs is NULL.
1514195a8afcSSteven Rostedt (Red Hat) */
1515195a8afcSSteven Rostedt (Red Hat) if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1516195a8afcSSteven Rostedt (Red Hat) return 0;
1517195a8afcSSteven Rostedt (Red Hat) #endif
1518195a8afcSSteven Rostedt (Red Hat)
1519f86f4180SChunyan Zhang rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1520f86f4180SChunyan Zhang rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1521b848914cSSteven Rostedt
1522fef5aeeeSSteven Rostedt (Red Hat) if (hash_contains_ip(ip, &hash))
1523b848914cSSteven Rostedt ret = 1;
1524b848914cSSteven Rostedt else
1525b848914cSSteven Rostedt ret = 0;
1526b848914cSSteven Rostedt
1527b848914cSSteven Rostedt return ret;
1528b848914cSSteven Rostedt }
1529b848914cSSteven Rostedt
1530b848914cSSteven Rostedt /*
1531265c831cSSteven Rostedt * This is a double for. Do not use 'break' to break out of the loop,
1532265c831cSSteven Rostedt * you must use a goto.
1533265c831cSSteven Rostedt */
1534265c831cSSteven Rostedt #define do_for_each_ftrace_rec(pg, rec) \
1535265c831cSSteven Rostedt for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1536265c831cSSteven Rostedt int _____i; \
1537265c831cSSteven Rostedt for (_____i = 0; _____i < pg->index; _____i++) { \
1538265c831cSSteven Rostedt rec = &pg->records[_____i];
1539265c831cSSteven Rostedt
1540265c831cSSteven Rostedt #define while_for_each_ftrace_rec() \
1541265c831cSSteven Rostedt } \
1542265c831cSSteven Rostedt }
1543ecea656dSAbhishek Sagar
15445855feadSSteven Rostedt
ftrace_cmp_recs(const void * a,const void * b)15455855feadSSteven Rostedt static int ftrace_cmp_recs(const void *a, const void *b)
15465855feadSSteven Rostedt {
1547a650e02aSSteven Rostedt const struct dyn_ftrace *key = a;
1548a650e02aSSteven Rostedt const struct dyn_ftrace *rec = b;
15495855feadSSteven Rostedt
1550a650e02aSSteven Rostedt if (key->flags < rec->ip)
15515855feadSSteven Rostedt return -1;
1552a650e02aSSteven Rostedt if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1553a650e02aSSteven Rostedt return 1;
1554a650e02aSSteven Rostedt return 0;
1555a650e02aSSteven Rostedt }
1556a650e02aSSteven Rostedt
lookup_rec(unsigned long start,unsigned long end)15577e16f581SSteven Rostedt (VMware) static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
15587e16f581SSteven Rostedt (VMware) {
15597e16f581SSteven Rostedt (VMware) struct ftrace_page *pg;
15607e16f581SSteven Rostedt (VMware) struct dyn_ftrace *rec = NULL;
15617e16f581SSteven Rostedt (VMware) struct dyn_ftrace key;
15627e16f581SSteven Rostedt (VMware)
15637e16f581SSteven Rostedt (VMware) key.ip = start;
15647e16f581SSteven Rostedt (VMware) key.flags = end; /* overload flags, as it is unsigned long */
15657e16f581SSteven Rostedt (VMware)
15667e16f581SSteven Rostedt (VMware) for (pg = ftrace_pages_start; pg; pg = pg->next) {
1567ee92fa44SChen Zhongjin if (pg->index == 0 ||
1568ee92fa44SChen Zhongjin end < pg->records[0].ip ||
15697e16f581SSteven Rostedt (VMware) start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
15707e16f581SSteven Rostedt (VMware) continue;
15717e16f581SSteven Rostedt (VMware) rec = bsearch(&key, pg->records, pg->index,
15727e16f581SSteven Rostedt (VMware) sizeof(struct dyn_ftrace),
15737e16f581SSteven Rostedt (VMware) ftrace_cmp_recs);
1574d9815bffSArtem Savkov if (rec)
1575d9815bffSArtem Savkov break;
15767e16f581SSteven Rostedt (VMware) }
15777e16f581SSteven Rostedt (VMware) return rec;
15787e16f581SSteven Rostedt (VMware) }
15797e16f581SSteven Rostedt (VMware)
158004cf31a7SMichael Ellerman /**
158104cf31a7SMichael Ellerman * ftrace_location_range - return the first address of a traced location
158204cf31a7SMichael Ellerman * if it touches the given ip range
158304cf31a7SMichael Ellerman * @start: start of range to search.
158404cf31a7SMichael Ellerman * @end: end of range to search (inclusive). @end points to the last byte
158504cf31a7SMichael Ellerman * to check.
158604cf31a7SMichael Ellerman *
158704cf31a7SMichael Ellerman * Returns rec->ip if the related ftrace location is a least partly within
158804cf31a7SMichael Ellerman * the given address range. That is, the first address of the instruction
158904cf31a7SMichael Ellerman * that is either a NOP or call to the function tracer. It checks the ftrace
159004cf31a7SMichael Ellerman * internal tables to determine if the address belongs or not.
159104cf31a7SMichael Ellerman */
ftrace_location_range(unsigned long start,unsigned long end)159204cf31a7SMichael Ellerman unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1593a650e02aSSteven Rostedt {
1594a650e02aSSteven Rostedt struct dyn_ftrace *rec;
15957b4881daSZheng Yejian unsigned long ip = 0;
1596a650e02aSSteven Rostedt
15977b4881daSZheng Yejian rcu_read_lock();
15987e16f581SSteven Rostedt (VMware) rec = lookup_rec(start, end);
1599a650e02aSSteven Rostedt if (rec)
16007b4881daSZheng Yejian ip = rec->ip;
16017b4881daSZheng Yejian rcu_read_unlock();
1602a650e02aSSteven Rostedt
16037b4881daSZheng Yejian return ip;
16045855feadSSteven Rostedt }
16055855feadSSteven Rostedt
1606c88fd863SSteven Rostedt /**
1607aebfd125SPeter Zijlstra * ftrace_location - return the ftrace location
1608c88fd863SSteven Rostedt * @ip: the instruction pointer to check
1609c88fd863SSteven Rostedt *
1610aebfd125SPeter Zijlstra * If @ip matches the ftrace location, return @ip.
1611aebfd125SPeter Zijlstra * If @ip matches sym+0, return sym's ftrace location.
1612aebfd125SPeter Zijlstra * Otherwise, return 0.
1613c88fd863SSteven Rostedt */
ftrace_location(unsigned long ip)1614f0cf973aSSteven Rostedt unsigned long ftrace_location(unsigned long ip)
1615c88fd863SSteven Rostedt {
16167b4881daSZheng Yejian unsigned long loc;
1617aebfd125SPeter Zijlstra unsigned long offset;
1618aebfd125SPeter Zijlstra unsigned long size;
1619aebfd125SPeter Zijlstra
16207b4881daSZheng Yejian loc = ftrace_location_range(ip, ip);
16217b4881daSZheng Yejian if (!loc) {
1622aebfd125SPeter Zijlstra if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1623aebfd125SPeter Zijlstra goto out;
1624aebfd125SPeter Zijlstra
1625aebfd125SPeter Zijlstra /* map sym+0 to __fentry__ */
1626aebfd125SPeter Zijlstra if (!offset)
16277b4881daSZheng Yejian loc = ftrace_location_range(ip, ip + size - 1);
1628aebfd125SPeter Zijlstra }
1629aebfd125SPeter Zijlstra
1630aebfd125SPeter Zijlstra out:
16317b4881daSZheng Yejian return loc;
16325855feadSSteven Rostedt }
1633c88fd863SSteven Rostedt
1634a650e02aSSteven Rostedt /**
1635a650e02aSSteven Rostedt * ftrace_text_reserved - return true if range contains an ftrace location
1636a650e02aSSteven Rostedt * @start: start of range to search
1637a650e02aSSteven Rostedt * @end: end of range to search (inclusive). @end points to the last byte to check.
1638a650e02aSSteven Rostedt *
1639a650e02aSSteven Rostedt * Returns 1 if @start and @end contains a ftrace location.
1640a650e02aSSteven Rostedt * That is, the instruction that is either a NOP or call to
1641a650e02aSSteven Rostedt * the function tracer. It checks the ftrace internal tables to
1642a650e02aSSteven Rostedt * determine if the address belongs or not.
1643a650e02aSSteven Rostedt */
ftrace_text_reserved(const void * start,const void * end)1644d88471cbSSasha Levin int ftrace_text_reserved(const void *start, const void *end)
1645a650e02aSSteven Rostedt {
1646f0cf973aSSteven Rostedt unsigned long ret;
1647f0cf973aSSteven Rostedt
1648f0cf973aSSteven Rostedt ret = ftrace_location_range((unsigned long)start,
1649a650e02aSSteven Rostedt (unsigned long)end);
1650f0cf973aSSteven Rostedt
1651f0cf973aSSteven Rostedt return (int)!!ret;
1652c88fd863SSteven Rostedt }
1653c88fd863SSteven Rostedt
16544fbb48cbSSteven Rostedt (Red Hat) /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)16554fbb48cbSSteven Rostedt (Red Hat) static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
16564fbb48cbSSteven Rostedt (Red Hat) {
16574fbb48cbSSteven Rostedt (Red Hat) struct ftrace_ops *ops;
16584fbb48cbSSteven Rostedt (Red Hat) bool keep_regs = false;
16594fbb48cbSSteven Rostedt (Red Hat)
16604fbb48cbSSteven Rostedt (Red Hat) for (ops = ftrace_ops_list;
16614fbb48cbSSteven Rostedt (Red Hat) ops != &ftrace_list_end; ops = ops->next) {
16624fbb48cbSSteven Rostedt (Red Hat) /* pass rec in as regs to have non-NULL val */
16634fbb48cbSSteven Rostedt (Red Hat) if (ftrace_ops_test(ops, rec->ip, rec)) {
16644fbb48cbSSteven Rostedt (Red Hat) if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
16654fbb48cbSSteven Rostedt (Red Hat) keep_regs = true;
16664fbb48cbSSteven Rostedt (Red Hat) break;
16674fbb48cbSSteven Rostedt (Red Hat) }
16684fbb48cbSSteven Rostedt (Red Hat) }
16694fbb48cbSSteven Rostedt (Red Hat) }
16704fbb48cbSSteven Rostedt (Red Hat)
16714fbb48cbSSteven Rostedt (Red Hat) return keep_regs;
16724fbb48cbSSteven Rostedt (Red Hat) }
16734fbb48cbSSteven Rostedt (Red Hat)
1674a124692bSCheng Jian static struct ftrace_ops *
1675a124692bSCheng Jian ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1676a124692bSCheng Jian static struct ftrace_ops *
16774c75b0ffSNaveen N. Rao ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
16784c75b0ffSNaveen N. Rao static struct ftrace_ops *
1679a124692bSCheng Jian ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1680a124692bSCheng Jian
skip_record(struct dyn_ftrace * rec)1681cf04f2d5SSteven Rostedt (Google) static bool skip_record(struct dyn_ftrace *rec)
1682cf04f2d5SSteven Rostedt (Google) {
1683cf04f2d5SSteven Rostedt (Google) /*
1684cf04f2d5SSteven Rostedt (Google) * At boot up, weak functions are set to disable. Function tracing
1685cf04f2d5SSteven Rostedt (Google) * can be enabled before they are, and they still need to be disabled now.
1686cf04f2d5SSteven Rostedt (Google) * If the record is disabled, still continue if it is marked as already
1687cf04f2d5SSteven Rostedt (Google) * enabled (this is needed to keep the accounting working).
1688cf04f2d5SSteven Rostedt (Google) */
1689cf04f2d5SSteven Rostedt (Google) return rec->flags & FTRACE_FL_DISABLED &&
1690cf04f2d5SSteven Rostedt (Google) !(rec->flags & FTRACE_FL_ENABLED);
1691cf04f2d5SSteven Rostedt (Google) }
1692cf04f2d5SSteven Rostedt (Google)
__ftrace_hash_rec_update(struct ftrace_ops * ops,int filter_hash,bool inc)169384b6d3e6SJiri Olsa static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1694ed926f9bSSteven Rostedt int filter_hash,
1695ed926f9bSSteven Rostedt bool inc)
1696ed926f9bSSteven Rostedt {
1697ed926f9bSSteven Rostedt struct ftrace_hash *hash;
1698ed926f9bSSteven Rostedt struct ftrace_hash *other_hash;
1699ed926f9bSSteven Rostedt struct ftrace_page *pg;
1700ed926f9bSSteven Rostedt struct dyn_ftrace *rec;
170184b6d3e6SJiri Olsa bool update = false;
1702ed926f9bSSteven Rostedt int count = 0;
17038c08f0d5SSteven Rostedt (VMware) int all = false;
1704ed926f9bSSteven Rostedt
1705ed926f9bSSteven Rostedt /* Only update if the ops has been registered */
1706ed926f9bSSteven Rostedt if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
170784b6d3e6SJiri Olsa return false;
1708ed926f9bSSteven Rostedt
1709ed926f9bSSteven Rostedt /*
1710ed926f9bSSteven Rostedt * In the filter_hash case:
1711ed926f9bSSteven Rostedt * If the count is zero, we update all records.
1712ed926f9bSSteven Rostedt * Otherwise we just update the items in the hash.
1713ed926f9bSSteven Rostedt *
1714ed926f9bSSteven Rostedt * In the notrace_hash case:
1715ed926f9bSSteven Rostedt * We enable the update in the hash.
1716ed926f9bSSteven Rostedt * As disabling notrace means enabling the tracing,
1717ed926f9bSSteven Rostedt * and enabling notrace means disabling, the inc variable
1718ed926f9bSSteven Rostedt * gets inversed.
1719ed926f9bSSteven Rostedt */
1720ed926f9bSSteven Rostedt if (filter_hash) {
172133b7f99cSSteven Rostedt (Red Hat) hash = ops->func_hash->filter_hash;
172233b7f99cSSteven Rostedt (Red Hat) other_hash = ops->func_hash->notrace_hash;
172306a51d93SSteven Rostedt if (ftrace_hash_empty(hash))
17248c08f0d5SSteven Rostedt (VMware) all = true;
1725ed926f9bSSteven Rostedt } else {
1726ed926f9bSSteven Rostedt inc = !inc;
172733b7f99cSSteven Rostedt (Red Hat) hash = ops->func_hash->notrace_hash;
172833b7f99cSSteven Rostedt (Red Hat) other_hash = ops->func_hash->filter_hash;
1729ed926f9bSSteven Rostedt /*
1730ed926f9bSSteven Rostedt * If the notrace hash has no items,
1731ed926f9bSSteven Rostedt * then there's nothing to do.
1732ed926f9bSSteven Rostedt */
173306a51d93SSteven Rostedt if (ftrace_hash_empty(hash))
173484b6d3e6SJiri Olsa return false;
1735ed926f9bSSteven Rostedt }
1736ed926f9bSSteven Rostedt
1737ed926f9bSSteven Rostedt do_for_each_ftrace_rec(pg, rec) {
1738ed926f9bSSteven Rostedt int in_other_hash = 0;
1739ed926f9bSSteven Rostedt int in_hash = 0;
1740ed926f9bSSteven Rostedt int match = 0;
1741ed926f9bSSteven Rostedt
1742cf04f2d5SSteven Rostedt (Google) if (skip_record(rec))
1743b7ffffbbSSteven Rostedt (Red Hat) continue;
1744b7ffffbbSSteven Rostedt (Red Hat)
1745ed926f9bSSteven Rostedt if (all) {
1746ed926f9bSSteven Rostedt /*
1747ed926f9bSSteven Rostedt * Only the filter_hash affects all records.
1748ed926f9bSSteven Rostedt * Update if the record is not in the notrace hash.
1749ed926f9bSSteven Rostedt */
1750b848914cSSteven Rostedt if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1751ed926f9bSSteven Rostedt match = 1;
1752ed926f9bSSteven Rostedt } else {
175306a51d93SSteven Rostedt in_hash = !!ftrace_lookup_ip(hash, rec->ip);
175406a51d93SSteven Rostedt in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1755ed926f9bSSteven Rostedt
1756ed926f9bSSteven Rostedt /*
175719eab4a4SSteven Rostedt (Red Hat) * If filter_hash is set, we want to match all functions
175819eab4a4SSteven Rostedt (Red Hat) * that are in the hash but not in the other hash.
1759ed926f9bSSteven Rostedt *
176019eab4a4SSteven Rostedt (Red Hat) * If filter_hash is not set, then we are decrementing.
176119eab4a4SSteven Rostedt (Red Hat) * That means we match anything that is in the hash
176219eab4a4SSteven Rostedt (Red Hat) * and also in the other_hash. That is, we need to turn
176319eab4a4SSteven Rostedt (Red Hat) * off functions in the other hash because they are disabled
176419eab4a4SSteven Rostedt (Red Hat) * by this hash.
1765ed926f9bSSteven Rostedt */
1766ed926f9bSSteven Rostedt if (filter_hash && in_hash && !in_other_hash)
1767ed926f9bSSteven Rostedt match = 1;
1768ed926f9bSSteven Rostedt else if (!filter_hash && in_hash &&
176906a51d93SSteven Rostedt (in_other_hash || ftrace_hash_empty(other_hash)))
1770ed926f9bSSteven Rostedt match = 1;
1771ed926f9bSSteven Rostedt }
1772ed926f9bSSteven Rostedt if (!match)
1773ed926f9bSSteven Rostedt continue;
1774ed926f9bSSteven Rostedt
1775ed926f9bSSteven Rostedt if (inc) {
1776ed926f9bSSteven Rostedt rec->flags++;
17770376bde1SSteven Rostedt (Red Hat) if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
177884b6d3e6SJiri Olsa return false;
177979922b80SSteven Rostedt (Red Hat)
1780763e34e7SSteven Rostedt (VMware) if (ops->flags & FTRACE_OPS_FL_DIRECT)
1781763e34e7SSteven Rostedt (VMware) rec->flags |= FTRACE_FL_DIRECT;
1782763e34e7SSteven Rostedt (VMware)
178379922b80SSteven Rostedt (Red Hat) /*
178479922b80SSteven Rostedt (Red Hat) * If there's only a single callback registered to a
178579922b80SSteven Rostedt (Red Hat) * function, and the ops has a trampoline registered
178679922b80SSteven Rostedt (Red Hat) * for it, then we can call it directly.
178779922b80SSteven Rostedt (Red Hat) */
1788fef5aeeeSSteven Rostedt (Red Hat) if (ftrace_rec_count(rec) == 1 && ops->trampoline)
178979922b80SSteven Rostedt (Red Hat) rec->flags |= FTRACE_FL_TRAMP;
1790fef5aeeeSSteven Rostedt (Red Hat) else
179179922b80SSteven Rostedt (Red Hat) /*
179279922b80SSteven Rostedt (Red Hat) * If we are adding another function callback
179379922b80SSteven Rostedt (Red Hat) * to this function, and the previous had a
1794bce0b6c5SSteven Rostedt (Red Hat) * custom trampoline in use, then we need to go
1795bce0b6c5SSteven Rostedt (Red Hat) * back to the default trampoline.
179679922b80SSteven Rostedt (Red Hat) */
1797fef5aeeeSSteven Rostedt (Red Hat) rec->flags &= ~FTRACE_FL_TRAMP;
179879922b80SSteven Rostedt (Red Hat)
179908f6fba5SSteven Rostedt /*
180008f6fba5SSteven Rostedt * If any ops wants regs saved for this function
180108f6fba5SSteven Rostedt * then all ops will get saved regs.
180208f6fba5SSteven Rostedt */
180308f6fba5SSteven Rostedt if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
180408f6fba5SSteven Rostedt rec->flags |= FTRACE_FL_REGS;
1805ed926f9bSSteven Rostedt } else {
18060376bde1SSteven Rostedt (Red Hat) if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
180784b6d3e6SJiri Olsa return false;
1808ed926f9bSSteven Rostedt rec->flags--;
180979922b80SSteven Rostedt (Red Hat)
18104fbb48cbSSteven Rostedt (Red Hat) /*
1811763e34e7SSteven Rostedt (VMware) * Only the internal direct_ops should have the
1812763e34e7SSteven Rostedt (VMware) * DIRECT flag set. Thus, if it is removing a
1813763e34e7SSteven Rostedt (VMware) * function, then that function should no longer
1814763e34e7SSteven Rostedt (VMware) * be direct.
1815763e34e7SSteven Rostedt (VMware) */
1816763e34e7SSteven Rostedt (VMware) if (ops->flags & FTRACE_OPS_FL_DIRECT)
1817763e34e7SSteven Rostedt (VMware) rec->flags &= ~FTRACE_FL_DIRECT;
1818763e34e7SSteven Rostedt (VMware)
1819763e34e7SSteven Rostedt (VMware) /*
18204fbb48cbSSteven Rostedt (Red Hat) * If the rec had REGS enabled and the ops that is
18214fbb48cbSSteven Rostedt (Red Hat) * being removed had REGS set, then see if there is
18224fbb48cbSSteven Rostedt (Red Hat) * still any ops for this record that wants regs.
18234fbb48cbSSteven Rostedt (Red Hat) * If not, we can stop recording them.
18244fbb48cbSSteven Rostedt (Red Hat) */
18250376bde1SSteven Rostedt (Red Hat) if (ftrace_rec_count(rec) > 0 &&
18264fbb48cbSSteven Rostedt (Red Hat) rec->flags & FTRACE_FL_REGS &&
18274fbb48cbSSteven Rostedt (Red Hat) ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
18284fbb48cbSSteven Rostedt (Red Hat) if (!test_rec_ops_needs_regs(rec))
18294fbb48cbSSteven Rostedt (Red Hat) rec->flags &= ~FTRACE_FL_REGS;
18304fbb48cbSSteven Rostedt (Red Hat) }
183179922b80SSteven Rostedt (Red Hat)
183279922b80SSteven Rostedt (Red Hat) /*
1833a124692bSCheng Jian * The TRAMP needs to be set only if rec count
1834a124692bSCheng Jian * is decremented to one, and the ops that is
1835a124692bSCheng Jian * left has a trampoline. As TRAMP can only be
1836a124692bSCheng Jian * enabled if there is only a single ops attached
1837a124692bSCheng Jian * to it.
1838fef5aeeeSSteven Rostedt (Red Hat) */
1839a124692bSCheng Jian if (ftrace_rec_count(rec) == 1 &&
18404c75b0ffSNaveen N. Rao ftrace_find_tramp_ops_any_other(rec, ops))
1841a124692bSCheng Jian rec->flags |= FTRACE_FL_TRAMP;
1842a124692bSCheng Jian else
1843fef5aeeeSSteven Rostedt (Red Hat) rec->flags &= ~FTRACE_FL_TRAMP;
1844fef5aeeeSSteven Rostedt (Red Hat)
1845fef5aeeeSSteven Rostedt (Red Hat) /*
184679922b80SSteven Rostedt (Red Hat) * flags will be cleared in ftrace_check_record()
184779922b80SSteven Rostedt (Red Hat) * if rec count is zero.
184879922b80SSteven Rostedt (Red Hat) */
1849ed926f9bSSteven Rostedt }
1850cbad0fb2SMark Rutland
1851cbad0fb2SMark Rutland /*
1852cbad0fb2SMark Rutland * If the rec has a single associated ops, and ops->func can be
1853cbad0fb2SMark Rutland * called directly, allow the call site to call via the ops.
1854cbad0fb2SMark Rutland */
1855cbad0fb2SMark Rutland if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1856cbad0fb2SMark Rutland ftrace_rec_count(rec) == 1 &&
1857cbad0fb2SMark Rutland ftrace_ops_get_func(ops) == ops->func)
1858cbad0fb2SMark Rutland rec->flags |= FTRACE_FL_CALL_OPS;
1859cbad0fb2SMark Rutland else
1860cbad0fb2SMark Rutland rec->flags &= ~FTRACE_FL_CALL_OPS;
1861cbad0fb2SMark Rutland
1862ed926f9bSSteven Rostedt count++;
186384b6d3e6SJiri Olsa
186484b6d3e6SJiri Olsa /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
18657375dca1SSteven Rostedt (VMware) update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
186684b6d3e6SJiri Olsa
1867ed926f9bSSteven Rostedt /* Shortcut, if we handled all records, we are done. */
1868ed926f9bSSteven Rostedt if (!all && count == hash->count)
186984b6d3e6SJiri Olsa return update;
1870ed926f9bSSteven Rostedt } while_for_each_ftrace_rec();
187184b6d3e6SJiri Olsa
187284b6d3e6SJiri Olsa return update;
1873ed926f9bSSteven Rostedt }
1874ed926f9bSSteven Rostedt
ftrace_hash_rec_disable(struct ftrace_ops * ops,int filter_hash)187584b6d3e6SJiri Olsa static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1876ed926f9bSSteven Rostedt int filter_hash)
1877ed926f9bSSteven Rostedt {
187884b6d3e6SJiri Olsa return __ftrace_hash_rec_update(ops, filter_hash, 0);
1879ed926f9bSSteven Rostedt }
1880ed926f9bSSteven Rostedt
ftrace_hash_rec_enable(struct ftrace_ops * ops,int filter_hash)188184b6d3e6SJiri Olsa static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1882ed926f9bSSteven Rostedt int filter_hash)
1883ed926f9bSSteven Rostedt {
188484b6d3e6SJiri Olsa return __ftrace_hash_rec_update(ops, filter_hash, 1);
1885ed926f9bSSteven Rostedt }
1886ed926f9bSSteven Rostedt
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,int filter_hash,int inc)188784261912SSteven Rostedt (Red Hat) static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
188884261912SSteven Rostedt (Red Hat) int filter_hash, int inc)
188984261912SSteven Rostedt (Red Hat) {
189084261912SSteven Rostedt (Red Hat) struct ftrace_ops *op;
189184261912SSteven Rostedt (Red Hat)
189284261912SSteven Rostedt (Red Hat) __ftrace_hash_rec_update(ops, filter_hash, inc);
189384261912SSteven Rostedt (Red Hat)
189484261912SSteven Rostedt (Red Hat) if (ops->func_hash != &global_ops.local_hash)
189584261912SSteven Rostedt (Red Hat) return;
189684261912SSteven Rostedt (Red Hat)
189784261912SSteven Rostedt (Red Hat) /*
189884261912SSteven Rostedt (Red Hat) * If the ops shares the global_ops hash, then we need to update
189984261912SSteven Rostedt (Red Hat) * all ops that are enabled and use this hash.
190084261912SSteven Rostedt (Red Hat) */
190184261912SSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
190284261912SSteven Rostedt (Red Hat) /* Already done */
190384261912SSteven Rostedt (Red Hat) if (op == ops)
190484261912SSteven Rostedt (Red Hat) continue;
190584261912SSteven Rostedt (Red Hat) if (op->func_hash == &global_ops.local_hash)
190684261912SSteven Rostedt (Red Hat) __ftrace_hash_rec_update(op, filter_hash, inc);
190784261912SSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
190884261912SSteven Rostedt (Red Hat) }
190984261912SSteven Rostedt (Red Hat)
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops,int filter_hash)191084261912SSteven Rostedt (Red Hat) static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
191184261912SSteven Rostedt (Red Hat) int filter_hash)
191284261912SSteven Rostedt (Red Hat) {
191384261912SSteven Rostedt (Red Hat) ftrace_hash_rec_update_modify(ops, filter_hash, 0);
191484261912SSteven Rostedt (Red Hat) }
191584261912SSteven Rostedt (Red Hat)
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops,int filter_hash)191684261912SSteven Rostedt (Red Hat) static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
191784261912SSteven Rostedt (Red Hat) int filter_hash)
191884261912SSteven Rostedt (Red Hat) {
191984261912SSteven Rostedt (Red Hat) ftrace_hash_rec_update_modify(ops, filter_hash, 1);
192084261912SSteven Rostedt (Red Hat) }
192184261912SSteven Rostedt (Red Hat)
1922f8b8be8aSMasami Hiramatsu /*
1923f8b8be8aSMasami Hiramatsu * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1924f8b8be8aSMasami Hiramatsu * or no-needed to update, -EBUSY if it detects a conflict of the flag
1925f8b8be8aSMasami Hiramatsu * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1926f8b8be8aSMasami Hiramatsu * Note that old_hash and new_hash has below meanings
1927f8b8be8aSMasami Hiramatsu * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1928f8b8be8aSMasami Hiramatsu * - If the hash is EMPTY_HASH, it hits nothing
1929f8b8be8aSMasami Hiramatsu * - Anything else hits the recs which match the hash entries.
193053cd885bSSong Liu *
193153cd885bSSong Liu * DIRECT ops does not have IPMODIFY flag, but we still need to check it
193253cd885bSSong Liu * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
193353cd885bSSong Liu * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
193453cd885bSSong Liu * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
193553cd885bSSong Liu * the return value to the caller and eventually to the owner of the DIRECT
193653cd885bSSong Liu * ops.
1937f8b8be8aSMasami Hiramatsu */
__ftrace_hash_update_ipmodify(struct ftrace_ops * ops,struct ftrace_hash * old_hash,struct ftrace_hash * new_hash)1938f8b8be8aSMasami Hiramatsu static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1939f8b8be8aSMasami Hiramatsu struct ftrace_hash *old_hash,
1940f8b8be8aSMasami Hiramatsu struct ftrace_hash *new_hash)
1941f8b8be8aSMasami Hiramatsu {
1942f8b8be8aSMasami Hiramatsu struct ftrace_page *pg;
1943f8b8be8aSMasami Hiramatsu struct dyn_ftrace *rec, *end = NULL;
1944f8b8be8aSMasami Hiramatsu int in_old, in_new;
194553cd885bSSong Liu bool is_ipmodify, is_direct;
1946f8b8be8aSMasami Hiramatsu
1947f8b8be8aSMasami Hiramatsu /* Only update if the ops has been registered */
1948f8b8be8aSMasami Hiramatsu if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1949f8b8be8aSMasami Hiramatsu return 0;
1950f8b8be8aSMasami Hiramatsu
195153cd885bSSong Liu is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
195253cd885bSSong Liu is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
195353cd885bSSong Liu
195453cd885bSSong Liu /* neither IPMODIFY nor DIRECT, skip */
195553cd885bSSong Liu if (!is_ipmodify && !is_direct)
195653cd885bSSong Liu return 0;
195753cd885bSSong Liu
195853cd885bSSong Liu if (WARN_ON_ONCE(is_ipmodify && is_direct))
1959f8b8be8aSMasami Hiramatsu return 0;
1960f8b8be8aSMasami Hiramatsu
1961f8b8be8aSMasami Hiramatsu /*
196253cd885bSSong Liu * Since the IPMODIFY and DIRECT are very address sensitive
196353cd885bSSong Liu * actions, we do not allow ftrace_ops to set all functions to new
196453cd885bSSong Liu * hash.
1965f8b8be8aSMasami Hiramatsu */
1966f8b8be8aSMasami Hiramatsu if (!new_hash || !old_hash)
1967f8b8be8aSMasami Hiramatsu return -EINVAL;
1968f8b8be8aSMasami Hiramatsu
1969f8b8be8aSMasami Hiramatsu /* Update rec->flags */
1970f8b8be8aSMasami Hiramatsu do_for_each_ftrace_rec(pg, rec) {
1971546fece4SSteven Rostedt (Red Hat)
1972546fece4SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_DISABLED)
1973546fece4SSteven Rostedt (Red Hat) continue;
1974546fece4SSteven Rostedt (Red Hat)
1975f8b8be8aSMasami Hiramatsu /* We need to update only differences of filter_hash */
1976f8b8be8aSMasami Hiramatsu in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1977f8b8be8aSMasami Hiramatsu in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1978f8b8be8aSMasami Hiramatsu if (in_old == in_new)
1979f8b8be8aSMasami Hiramatsu continue;
1980f8b8be8aSMasami Hiramatsu
1981f8b8be8aSMasami Hiramatsu if (in_new) {
198253cd885bSSong Liu if (rec->flags & FTRACE_FL_IPMODIFY) {
198353cd885bSSong Liu int ret;
198453cd885bSSong Liu
198553cd885bSSong Liu /* Cannot have two ipmodify on same rec */
198653cd885bSSong Liu if (is_ipmodify)
1987f8b8be8aSMasami Hiramatsu goto rollback;
198853cd885bSSong Liu
198953cd885bSSong Liu FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
199053cd885bSSong Liu
199153cd885bSSong Liu /*
199253cd885bSSong Liu * Another ops with IPMODIFY is already
199353cd885bSSong Liu * attached. We are now attaching a direct
199453cd885bSSong Liu * ops. Run SHARE_IPMODIFY_SELF, to check
199553cd885bSSong Liu * whether sharing is supported.
199653cd885bSSong Liu */
199753cd885bSSong Liu if (!ops->ops_func)
199853cd885bSSong Liu return -EBUSY;
199953cd885bSSong Liu ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
200053cd885bSSong Liu if (ret)
200153cd885bSSong Liu return ret;
200253cd885bSSong Liu } else if (is_ipmodify) {
2003f8b8be8aSMasami Hiramatsu rec->flags |= FTRACE_FL_IPMODIFY;
200453cd885bSSong Liu }
200553cd885bSSong Liu } else if (is_ipmodify) {
2006f8b8be8aSMasami Hiramatsu rec->flags &= ~FTRACE_FL_IPMODIFY;
200753cd885bSSong Liu }
2008f8b8be8aSMasami Hiramatsu } while_for_each_ftrace_rec();
2009f8b8be8aSMasami Hiramatsu
2010f8b8be8aSMasami Hiramatsu return 0;
2011f8b8be8aSMasami Hiramatsu
2012f8b8be8aSMasami Hiramatsu rollback:
2013f8b8be8aSMasami Hiramatsu end = rec;
2014f8b8be8aSMasami Hiramatsu
2015f8b8be8aSMasami Hiramatsu /* Roll back what we did above */
2016f8b8be8aSMasami Hiramatsu do_for_each_ftrace_rec(pg, rec) {
2017546fece4SSteven Rostedt (Red Hat)
2018546fece4SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_DISABLED)
2019546fece4SSteven Rostedt (Red Hat) continue;
2020546fece4SSteven Rostedt (Red Hat)
2021f8b8be8aSMasami Hiramatsu if (rec == end)
2022f8b8be8aSMasami Hiramatsu goto err_out;
2023f8b8be8aSMasami Hiramatsu
2024f8b8be8aSMasami Hiramatsu in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2025f8b8be8aSMasami Hiramatsu in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2026f8b8be8aSMasami Hiramatsu if (in_old == in_new)
2027f8b8be8aSMasami Hiramatsu continue;
2028f8b8be8aSMasami Hiramatsu
2029f8b8be8aSMasami Hiramatsu if (in_new)
2030f8b8be8aSMasami Hiramatsu rec->flags &= ~FTRACE_FL_IPMODIFY;
2031f8b8be8aSMasami Hiramatsu else
2032f8b8be8aSMasami Hiramatsu rec->flags |= FTRACE_FL_IPMODIFY;
2033f8b8be8aSMasami Hiramatsu } while_for_each_ftrace_rec();
2034f8b8be8aSMasami Hiramatsu
2035f8b8be8aSMasami Hiramatsu err_out:
2036f8b8be8aSMasami Hiramatsu return -EBUSY;
2037f8b8be8aSMasami Hiramatsu }
2038f8b8be8aSMasami Hiramatsu
ftrace_hash_ipmodify_enable(struct ftrace_ops * ops)2039f8b8be8aSMasami Hiramatsu static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2040f8b8be8aSMasami Hiramatsu {
2041f8b8be8aSMasami Hiramatsu struct ftrace_hash *hash = ops->func_hash->filter_hash;
2042f8b8be8aSMasami Hiramatsu
2043f8b8be8aSMasami Hiramatsu if (ftrace_hash_empty(hash))
2044f8b8be8aSMasami Hiramatsu hash = NULL;
2045f8b8be8aSMasami Hiramatsu
2046f8b8be8aSMasami Hiramatsu return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2047f8b8be8aSMasami Hiramatsu }
2048f8b8be8aSMasami Hiramatsu
2049f8b8be8aSMasami Hiramatsu /* Disabling always succeeds */
ftrace_hash_ipmodify_disable(struct ftrace_ops * ops)2050f8b8be8aSMasami Hiramatsu static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2051f8b8be8aSMasami Hiramatsu {
2052f8b8be8aSMasami Hiramatsu struct ftrace_hash *hash = ops->func_hash->filter_hash;
2053f8b8be8aSMasami Hiramatsu
2054f8b8be8aSMasami Hiramatsu if (ftrace_hash_empty(hash))
2055f8b8be8aSMasami Hiramatsu hash = NULL;
2056f8b8be8aSMasami Hiramatsu
2057f8b8be8aSMasami Hiramatsu __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2058f8b8be8aSMasami Hiramatsu }
2059f8b8be8aSMasami Hiramatsu
ftrace_hash_ipmodify_update(struct ftrace_ops * ops,struct ftrace_hash * new_hash)2060f8b8be8aSMasami Hiramatsu static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2061f8b8be8aSMasami Hiramatsu struct ftrace_hash *new_hash)
2062f8b8be8aSMasami Hiramatsu {
2063f8b8be8aSMasami Hiramatsu struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2064f8b8be8aSMasami Hiramatsu
2065f8b8be8aSMasami Hiramatsu if (ftrace_hash_empty(old_hash))
2066f8b8be8aSMasami Hiramatsu old_hash = NULL;
2067f8b8be8aSMasami Hiramatsu
2068f8b8be8aSMasami Hiramatsu if (ftrace_hash_empty(new_hash))
2069f8b8be8aSMasami Hiramatsu new_hash = NULL;
2070f8b8be8aSMasami Hiramatsu
2071f8b8be8aSMasami Hiramatsu return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2072f8b8be8aSMasami Hiramatsu }
2073f8b8be8aSMasami Hiramatsu
print_ip_ins(const char * fmt,const unsigned char * p)2074b05086c7SSteven Rostedt (Red Hat) static void print_ip_ins(const char *fmt, const unsigned char *p)
207505736a42SSteven Rostedt {
20766c14133dSSteven Rostedt (VMware) char ins[MCOUNT_INSN_SIZE];
207705736a42SSteven Rostedt
20786c14133dSSteven Rostedt (VMware) if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
20796c14133dSSteven Rostedt (VMware) printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
20806c14133dSSteven Rostedt (VMware) return;
20816c14133dSSteven Rostedt (VMware) }
20826c14133dSSteven Rostedt (VMware)
208305736a42SSteven Rostedt printk(KERN_CONT "%s", fmt);
208430f7d1caSZheng Yejian pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
208505736a42SSteven Rostedt }
208605736a42SSteven Rostedt
208702a392a0SSteven Rostedt (Red Hat) enum ftrace_bug_type ftrace_bug_type;
2088b05086c7SSteven Rostedt (Red Hat) const void *ftrace_expected;
208902a392a0SSteven Rostedt (Red Hat)
print_bug_type(void)209002a392a0SSteven Rostedt (Red Hat) static void print_bug_type(void)
209102a392a0SSteven Rostedt (Red Hat) {
209202a392a0SSteven Rostedt (Red Hat) switch (ftrace_bug_type) {
209302a392a0SSteven Rostedt (Red Hat) case FTRACE_BUG_UNKNOWN:
209402a392a0SSteven Rostedt (Red Hat) break;
209502a392a0SSteven Rostedt (Red Hat) case FTRACE_BUG_INIT:
209602a392a0SSteven Rostedt (Red Hat) pr_info("Initializing ftrace call sites\n");
209702a392a0SSteven Rostedt (Red Hat) break;
209802a392a0SSteven Rostedt (Red Hat) case FTRACE_BUG_NOP:
209902a392a0SSteven Rostedt (Red Hat) pr_info("Setting ftrace call site to NOP\n");
210002a392a0SSteven Rostedt (Red Hat) break;
210102a392a0SSteven Rostedt (Red Hat) case FTRACE_BUG_CALL:
210202a392a0SSteven Rostedt (Red Hat) pr_info("Setting ftrace call site to call ftrace function\n");
210302a392a0SSteven Rostedt (Red Hat) break;
210402a392a0SSteven Rostedt (Red Hat) case FTRACE_BUG_UPDATE:
210502a392a0SSteven Rostedt (Red Hat) pr_info("Updating ftrace call site to call a different ftrace function\n");
210602a392a0SSteven Rostedt (Red Hat) break;
210702a392a0SSteven Rostedt (Red Hat) }
210802a392a0SSteven Rostedt (Red Hat) }
210902a392a0SSteven Rostedt (Red Hat)
2110c88fd863SSteven Rostedt /**
2111c88fd863SSteven Rostedt * ftrace_bug - report and shutdown function tracer
2112c88fd863SSteven Rostedt * @failed: The failed type (EFAULT, EINVAL, EPERM)
21134fd3279bSSteven Rostedt (Red Hat) * @rec: The record that failed
2114c88fd863SSteven Rostedt *
2115c88fd863SSteven Rostedt * The arch code that enables or disables the function tracing
2116c88fd863SSteven Rostedt * can call ftrace_bug() when it has detected a problem in
2117c88fd863SSteven Rostedt * modifying the code. @failed should be one of either:
2118c88fd863SSteven Rostedt * EFAULT - if the problem happens on reading the @ip address
2119c88fd863SSteven Rostedt * EINVAL - if what is read at @ip is not what was expected
21209efb85c5SHariprasad Kelam * EPERM - if the problem happens on writing to the @ip address
2121c88fd863SSteven Rostedt */
ftrace_bug(int failed,struct dyn_ftrace * rec)21224fd3279bSSteven Rostedt (Red Hat) void ftrace_bug(int failed, struct dyn_ftrace *rec)
21233c1720f0SSteven Rostedt {
21244fd3279bSSteven Rostedt (Red Hat) unsigned long ip = rec ? rec->ip : 0;
21254fd3279bSSteven Rostedt (Red Hat)
2126c143b775SCheng Jian pr_info("------------[ ftrace bug ]------------\n");
2127c143b775SCheng Jian
2128b17e8a37SSteven Rostedt switch (failed) {
2129593eb8a2SSteven Rostedt case -EFAULT:
213005736a42SSteven Rostedt pr_info("ftrace faulted on modifying ");
21312062a4e8SDmitry Safonov print_ip_sym(KERN_INFO, ip);
213205736a42SSteven Rostedt break;
2133593eb8a2SSteven Rostedt case -EINVAL:
213405736a42SSteven Rostedt pr_info("ftrace failed to modify ");
21352062a4e8SDmitry Safonov print_ip_sym(KERN_INFO, ip);
213605736a42SSteven Rostedt print_ip_ins(" actual: ", (unsigned char *)ip);
21374fd3279bSSteven Rostedt (Red Hat) pr_cont("\n");
2138b05086c7SSteven Rostedt (Red Hat) if (ftrace_expected) {
2139b05086c7SSteven Rostedt (Red Hat) print_ip_ins(" expected: ", ftrace_expected);
2140b05086c7SSteven Rostedt (Red Hat) pr_cont("\n");
2141b05086c7SSteven Rostedt (Red Hat) }
214205736a42SSteven Rostedt break;
2143593eb8a2SSteven Rostedt case -EPERM:
2144593eb8a2SSteven Rostedt pr_info("ftrace faulted on writing ");
21452062a4e8SDmitry Safonov print_ip_sym(KERN_INFO, ip);
2146593eb8a2SSteven Rostedt break;
2147593eb8a2SSteven Rostedt default:
2148593eb8a2SSteven Rostedt pr_info("ftrace faulted on unknown error ");
21492062a4e8SDmitry Safonov print_ip_sym(KERN_INFO, ip);
215005736a42SSteven Rostedt }
215102a392a0SSteven Rostedt (Red Hat) print_bug_type();
21524fd3279bSSteven Rostedt (Red Hat) if (rec) {
21534fd3279bSSteven Rostedt (Red Hat) struct ftrace_ops *ops = NULL;
21544fd3279bSSteven Rostedt (Red Hat)
21554fd3279bSSteven Rostedt (Red Hat) pr_info("ftrace record flags: %lx\n", rec->flags);
2156cbad0fb2SMark Rutland pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2157cbad0fb2SMark Rutland rec->flags & FTRACE_FL_REGS ? " R" : " ",
2158cbad0fb2SMark Rutland rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ");
21594fd3279bSSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_TRAMP_EN) {
21604fd3279bSSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_any(rec);
216139daa7b9SSteven Rostedt (Red Hat) if (ops) {
216239daa7b9SSteven Rostedt (Red Hat) do {
216339daa7b9SSteven Rostedt (Red Hat) pr_cont("\ttramp: %pS (%pS)",
216439daa7b9SSteven Rostedt (Red Hat) (void *)ops->trampoline,
216539daa7b9SSteven Rostedt (Red Hat) (void *)ops->func);
216639daa7b9SSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_next(rec, ops);
216739daa7b9SSteven Rostedt (Red Hat) } while (ops);
216839daa7b9SSteven Rostedt (Red Hat) } else
21694fd3279bSSteven Rostedt (Red Hat) pr_cont("\ttramp: ERROR!");
21704fd3279bSSteven Rostedt (Red Hat)
21714fd3279bSSteven Rostedt (Red Hat) }
21724fd3279bSSteven Rostedt (Red Hat) ip = ftrace_get_addr_curr(rec);
217339daa7b9SSteven Rostedt (Red Hat) pr_cont("\n expected tramp: %lx\n", ip);
21744fd3279bSSteven Rostedt (Red Hat) }
2175c143b775SCheng Jian
2176c143b775SCheng Jian FTRACE_WARN_ON_ONCE(1);
2177b17e8a37SSteven Rostedt }
217805736a42SSteven Rostedt
ftrace_check_record(struct dyn_ftrace * rec,bool enable,bool update)21797375dca1SSteven Rostedt (VMware) static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
21803d083395SSteven Rostedt {
218164fbcd16SXiao Guangrong unsigned long flag = 0UL;
2182e7d3737eSFrederic Weisbecker
218302a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_UNKNOWN;
218402a392a0SSteven Rostedt (Red Hat)
2185cf04f2d5SSteven Rostedt (Google) if (skip_record(rec))
2186b7ffffbbSSteven Rostedt (Red Hat) return FTRACE_UPDATE_IGNORE;
2187b7ffffbbSSteven Rostedt (Red Hat)
21883d083395SSteven Rostedt /*
218930fb6aa7SJiri Olsa * If we are updating calls:
21903d083395SSteven Rostedt *
2191ed926f9bSSteven Rostedt * If the record has a ref count, then we need to enable it
2192ed926f9bSSteven Rostedt * because someone is using it.
21933d083395SSteven Rostedt *
2194ed926f9bSSteven Rostedt * Otherwise we make sure its disabled.
2195ed926f9bSSteven Rostedt *
219630fb6aa7SJiri Olsa * If we are disabling calls, then disable all records that
2197ed926f9bSSteven Rostedt * are enabled.
21983d083395SSteven Rostedt */
21990376bde1SSteven Rostedt (Red Hat) if (enable && ftrace_rec_count(rec))
220064fbcd16SXiao Guangrong flag = FTRACE_FL_ENABLED;
22013d083395SSteven Rostedt
220208f6fba5SSteven Rostedt /*
220379922b80SSteven Rostedt (Red Hat) * If enabling and the REGS flag does not match the REGS_EN, or
220479922b80SSteven Rostedt (Red Hat) * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
220579922b80SSteven Rostedt (Red Hat) * this record. Set flags to fail the compare against ENABLED.
2206763e34e7SSteven Rostedt (VMware) * Same for direct calls.
220708f6fba5SSteven Rostedt */
220879922b80SSteven Rostedt (Red Hat) if (flag) {
220979922b80SSteven Rostedt (Red Hat) if (!(rec->flags & FTRACE_FL_REGS) !=
221079922b80SSteven Rostedt (Red Hat) !(rec->flags & FTRACE_FL_REGS_EN))
221108f6fba5SSteven Rostedt flag |= FTRACE_FL_REGS;
221208f6fba5SSteven Rostedt
221379922b80SSteven Rostedt (Red Hat) if (!(rec->flags & FTRACE_FL_TRAMP) !=
221479922b80SSteven Rostedt (Red Hat) !(rec->flags & FTRACE_FL_TRAMP_EN))
221579922b80SSteven Rostedt (Red Hat) flag |= FTRACE_FL_TRAMP;
2216763e34e7SSteven Rostedt (VMware)
2217763e34e7SSteven Rostedt (VMware) /*
2218763e34e7SSteven Rostedt (VMware) * Direct calls are special, as count matters.
2219763e34e7SSteven Rostedt (VMware) * We must test the record for direct, if the
2220763e34e7SSteven Rostedt (VMware) * DIRECT and DIRECT_EN do not match, but only
2221763e34e7SSteven Rostedt (VMware) * if the count is 1. That's because, if the
2222763e34e7SSteven Rostedt (VMware) * count is something other than one, we do not
2223763e34e7SSteven Rostedt (VMware) * want the direct enabled (it will be done via the
2224763e34e7SSteven Rostedt (VMware) * direct helper). But if DIRECT_EN is set, and
2225763e34e7SSteven Rostedt (VMware) * the count is not one, we need to clear it.
2226cbad0fb2SMark Rutland *
2227763e34e7SSteven Rostedt (VMware) */
2228763e34e7SSteven Rostedt (VMware) if (ftrace_rec_count(rec) == 1) {
2229763e34e7SSteven Rostedt (VMware) if (!(rec->flags & FTRACE_FL_DIRECT) !=
2230763e34e7SSteven Rostedt (VMware) !(rec->flags & FTRACE_FL_DIRECT_EN))
2231763e34e7SSteven Rostedt (VMware) flag |= FTRACE_FL_DIRECT;
2232763e34e7SSteven Rostedt (VMware) } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2233763e34e7SSteven Rostedt (VMware) flag |= FTRACE_FL_DIRECT;
2234763e34e7SSteven Rostedt (VMware) }
2235cbad0fb2SMark Rutland
2236cbad0fb2SMark Rutland /*
2237cbad0fb2SMark Rutland * Ops calls are special, as count matters.
2238cbad0fb2SMark Rutland * As with direct calls, they must only be enabled when count
2239cbad0fb2SMark Rutland * is one, otherwise they'll be handled via the list ops.
2240cbad0fb2SMark Rutland */
2241cbad0fb2SMark Rutland if (ftrace_rec_count(rec) == 1) {
2242cbad0fb2SMark Rutland if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2243cbad0fb2SMark Rutland !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2244cbad0fb2SMark Rutland flag |= FTRACE_FL_CALL_OPS;
2245cbad0fb2SMark Rutland } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2246cbad0fb2SMark Rutland flag |= FTRACE_FL_CALL_OPS;
2247cbad0fb2SMark Rutland }
224879922b80SSteven Rostedt (Red Hat) }
224979922b80SSteven Rostedt (Red Hat)
225064fbcd16SXiao Guangrong /* If the state of this record hasn't changed, then do nothing */
225164fbcd16SXiao Guangrong if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2252c88fd863SSteven Rostedt return FTRACE_UPDATE_IGNORE;
225364fbcd16SXiao Guangrong
225464fbcd16SXiao Guangrong if (flag) {
225508f6fba5SSteven Rostedt /* Save off if rec is being enabled (for return value) */
225608f6fba5SSteven Rostedt flag ^= rec->flags & FTRACE_FL_ENABLED;
225708f6fba5SSteven Rostedt
225808f6fba5SSteven Rostedt if (update) {
2259e11b521aSSteven Rostedt (Google) rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
226008f6fba5SSteven Rostedt if (flag & FTRACE_FL_REGS) {
226108f6fba5SSteven Rostedt if (rec->flags & FTRACE_FL_REGS)
226208f6fba5SSteven Rostedt rec->flags |= FTRACE_FL_REGS_EN;
226308f6fba5SSteven Rostedt else
226408f6fba5SSteven Rostedt rec->flags &= ~FTRACE_FL_REGS_EN;
226508f6fba5SSteven Rostedt }
226679922b80SSteven Rostedt (Red Hat) if (flag & FTRACE_FL_TRAMP) {
226779922b80SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_TRAMP)
226879922b80SSteven Rostedt (Red Hat) rec->flags |= FTRACE_FL_TRAMP_EN;
226979922b80SSteven Rostedt (Red Hat) else
227079922b80SSteven Rostedt (Red Hat) rec->flags &= ~FTRACE_FL_TRAMP_EN;
227179922b80SSteven Rostedt (Red Hat) }
2272d19ad077SSteven Rostedt (VMware)
22736ce2c04fSSteven Rostedt (Google) /* Keep track of anything that modifies the function */
22746ce2c04fSSteven Rostedt (Google) if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
22756ce2c04fSSteven Rostedt (Google) rec->flags |= FTRACE_FL_MODIFIED;
22766ce2c04fSSteven Rostedt (Google)
2277763e34e7SSteven Rostedt (VMware) if (flag & FTRACE_FL_DIRECT) {
2278763e34e7SSteven Rostedt (VMware) /*
2279763e34e7SSteven Rostedt (VMware) * If there's only one user (direct_ops helper)
2280763e34e7SSteven Rostedt (VMware) * then we can call the direct function
2281763e34e7SSteven Rostedt (VMware) * directly (no ftrace trampoline).
2282763e34e7SSteven Rostedt (VMware) */
2283763e34e7SSteven Rostedt (VMware) if (ftrace_rec_count(rec) == 1) {
2284763e34e7SSteven Rostedt (VMware) if (rec->flags & FTRACE_FL_DIRECT)
2285763e34e7SSteven Rostedt (VMware) rec->flags |= FTRACE_FL_DIRECT_EN;
2286763e34e7SSteven Rostedt (VMware) else
2287763e34e7SSteven Rostedt (VMware) rec->flags &= ~FTRACE_FL_DIRECT_EN;
2288763e34e7SSteven Rostedt (VMware) } else {
2289763e34e7SSteven Rostedt (VMware) /*
2290763e34e7SSteven Rostedt (VMware) * Can only call directly if there's
2291763e34e7SSteven Rostedt (VMware) * only one callback to the function.
2292763e34e7SSteven Rostedt (VMware) */
2293763e34e7SSteven Rostedt (VMware) rec->flags &= ~FTRACE_FL_DIRECT_EN;
2294763e34e7SSteven Rostedt (VMware) }
2295763e34e7SSteven Rostedt (VMware) }
2296cbad0fb2SMark Rutland
2297cbad0fb2SMark Rutland if (flag & FTRACE_FL_CALL_OPS) {
2298cbad0fb2SMark Rutland if (ftrace_rec_count(rec) == 1) {
2299cbad0fb2SMark Rutland if (rec->flags & FTRACE_FL_CALL_OPS)
2300cbad0fb2SMark Rutland rec->flags |= FTRACE_FL_CALL_OPS_EN;
2301cbad0fb2SMark Rutland else
2302cbad0fb2SMark Rutland rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2303cbad0fb2SMark Rutland } else {
2304cbad0fb2SMark Rutland /*
2305cbad0fb2SMark Rutland * Can only call directly if there's
2306cbad0fb2SMark Rutland * only one set of associated ops.
2307cbad0fb2SMark Rutland */
2308cbad0fb2SMark Rutland rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2309cbad0fb2SMark Rutland }
2310cbad0fb2SMark Rutland }
231164fbcd16SXiao Guangrong }
231264fbcd16SXiao Guangrong
231308f6fba5SSteven Rostedt /*
231408f6fba5SSteven Rostedt * If this record is being updated from a nop, then
231508f6fba5SSteven Rostedt * return UPDATE_MAKE_CALL.
231608f6fba5SSteven Rostedt * Otherwise,
231708f6fba5SSteven Rostedt * return UPDATE_MODIFY_CALL to tell the caller to convert
2318f1b2f2bdSSteven Rostedt (Red Hat) * from the save regs, to a non-save regs function or
231979922b80SSteven Rostedt (Red Hat) * vice versa, or from a trampoline call.
232008f6fba5SSteven Rostedt */
232102a392a0SSteven Rostedt (Red Hat) if (flag & FTRACE_FL_ENABLED) {
232202a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_CALL;
232308f6fba5SSteven Rostedt return FTRACE_UPDATE_MAKE_CALL;
232402a392a0SSteven Rostedt (Red Hat) }
2325f1b2f2bdSSteven Rostedt (Red Hat)
232602a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_UPDATE;
232708f6fba5SSteven Rostedt return FTRACE_UPDATE_MODIFY_CALL;
232808f6fba5SSteven Rostedt }
232908f6fba5SSteven Rostedt
233008f6fba5SSteven Rostedt if (update) {
233108f6fba5SSteven Rostedt /* If there's no more users, clear all flags */
23320376bde1SSteven Rostedt (Red Hat) if (!ftrace_rec_count(rec))
2333e11b521aSSteven Rostedt (Google) rec->flags &= FTRACE_NOCLEAR_FLAGS;
233408f6fba5SSteven Rostedt else
2335b24d443bSSteven Rostedt (Red Hat) /*
2336b24d443bSSteven Rostedt (Red Hat) * Just disable the record, but keep the ops TRAMP
2337b24d443bSSteven Rostedt (Red Hat) * and REGS states. The _EN flags must be disabled though.
2338b24d443bSSteven Rostedt (Red Hat) */
2339b24d443bSSteven Rostedt (Red Hat) rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2340cbad0fb2SMark Rutland FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2341cbad0fb2SMark Rutland FTRACE_FL_CALL_OPS_EN);
234208f6fba5SSteven Rostedt }
2343c88fd863SSteven Rostedt
234402a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_NOP;
2345c88fd863SSteven Rostedt return FTRACE_UPDATE_MAKE_NOP;
2346c88fd863SSteven Rostedt }
2347c88fd863SSteven Rostedt
2348c88fd863SSteven Rostedt /**
23496130722fSSteven Rostedt (VMware) * ftrace_update_record - set a record that now is tracing or not
2350c88fd863SSteven Rostedt * @rec: the record to update
23517375dca1SSteven Rostedt (VMware) * @enable: set to true if the record is tracing, false to force disable
2352c88fd863SSteven Rostedt *
2353c88fd863SSteven Rostedt * The records that represent all functions that can be traced need
2354c88fd863SSteven Rostedt * to be updated when tracing has been enabled.
2355c88fd863SSteven Rostedt */
ftrace_update_record(struct dyn_ftrace * rec,bool enable)23567375dca1SSteven Rostedt (VMware) int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2357c88fd863SSteven Rostedt {
23587375dca1SSteven Rostedt (VMware) return ftrace_check_record(rec, enable, true);
2359c88fd863SSteven Rostedt }
2360c88fd863SSteven Rostedt
2361c88fd863SSteven Rostedt /**
23626130722fSSteven Rostedt (VMware) * ftrace_test_record - check if the record has been enabled or not
2363c88fd863SSteven Rostedt * @rec: the record to test
23647375dca1SSteven Rostedt (VMware) * @enable: set to true to check if enabled, false if it is disabled
2365c88fd863SSteven Rostedt *
2366c88fd863SSteven Rostedt * The arch code may need to test if a record is already set to
2367c88fd863SSteven Rostedt * tracing to determine how to modify the function code that it
2368c88fd863SSteven Rostedt * represents.
2369c88fd863SSteven Rostedt */
ftrace_test_record(struct dyn_ftrace * rec,bool enable)23707375dca1SSteven Rostedt (VMware) int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2371c88fd863SSteven Rostedt {
23727375dca1SSteven Rostedt (VMware) return ftrace_check_record(rec, enable, false);
2373c88fd863SSteven Rostedt }
2374c88fd863SSteven Rostedt
237579922b80SSteven Rostedt (Red Hat) static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)23765fecaa04SSteven Rostedt (Red Hat) ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
23775fecaa04SSteven Rostedt (Red Hat) {
23785fecaa04SSteven Rostedt (Red Hat) struct ftrace_ops *op;
2379fef5aeeeSSteven Rostedt (Red Hat) unsigned long ip = rec->ip;
23805fecaa04SSteven Rostedt (Red Hat)
23815fecaa04SSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
23825fecaa04SSteven Rostedt (Red Hat)
23835fecaa04SSteven Rostedt (Red Hat) if (!op->trampoline)
23845fecaa04SSteven Rostedt (Red Hat) continue;
23855fecaa04SSteven Rostedt (Red Hat)
2386fef5aeeeSSteven Rostedt (Red Hat) if (hash_contains_ip(ip, op->func_hash))
23875fecaa04SSteven Rostedt (Red Hat) return op;
23885fecaa04SSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
23895fecaa04SSteven Rostedt (Red Hat)
23905fecaa04SSteven Rostedt (Red Hat) return NULL;
23915fecaa04SSteven Rostedt (Red Hat) }
23925fecaa04SSteven Rostedt (Red Hat)
23935fecaa04SSteven Rostedt (Red Hat) static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace * rec,struct ftrace_ops * op_exclude)23944c75b0ffSNaveen N. Rao ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
23954c75b0ffSNaveen N. Rao {
23964c75b0ffSNaveen N. Rao struct ftrace_ops *op;
23974c75b0ffSNaveen N. Rao unsigned long ip = rec->ip;
23984c75b0ffSNaveen N. Rao
23994c75b0ffSNaveen N. Rao do_for_each_ftrace_op(op, ftrace_ops_list) {
24004c75b0ffSNaveen N. Rao
24014c75b0ffSNaveen N. Rao if (op == op_exclude || !op->trampoline)
24024c75b0ffSNaveen N. Rao continue;
24034c75b0ffSNaveen N. Rao
24044c75b0ffSNaveen N. Rao if (hash_contains_ip(ip, op->func_hash))
24054c75b0ffSNaveen N. Rao return op;
24064c75b0ffSNaveen N. Rao } while_for_each_ftrace_op(op);
24074c75b0ffSNaveen N. Rao
24084c75b0ffSNaveen N. Rao return NULL;
24094c75b0ffSNaveen N. Rao }
24104c75b0ffSNaveen N. Rao
24114c75b0ffSNaveen N. Rao static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace * rec,struct ftrace_ops * op)241239daa7b9SSteven Rostedt (Red Hat) ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
241339daa7b9SSteven Rostedt (Red Hat) struct ftrace_ops *op)
241439daa7b9SSteven Rostedt (Red Hat) {
241539daa7b9SSteven Rostedt (Red Hat) unsigned long ip = rec->ip;
241639daa7b9SSteven Rostedt (Red Hat)
241739daa7b9SSteven Rostedt (Red Hat) while_for_each_ftrace_op(op) {
241839daa7b9SSteven Rostedt (Red Hat)
241939daa7b9SSteven Rostedt (Red Hat) if (!op->trampoline)
242039daa7b9SSteven Rostedt (Red Hat) continue;
242139daa7b9SSteven Rostedt (Red Hat)
242239daa7b9SSteven Rostedt (Red Hat) if (hash_contains_ip(ip, op->func_hash))
242339daa7b9SSteven Rostedt (Red Hat) return op;
242439daa7b9SSteven Rostedt (Red Hat) }
242539daa7b9SSteven Rostedt (Red Hat)
242639daa7b9SSteven Rostedt (Red Hat) return NULL;
242739daa7b9SSteven Rostedt (Red Hat) }
242839daa7b9SSteven Rostedt (Red Hat)
242939daa7b9SSteven Rostedt (Red Hat) static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)243079922b80SSteven Rostedt (Red Hat) ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
243179922b80SSteven Rostedt (Red Hat) {
243279922b80SSteven Rostedt (Red Hat) struct ftrace_ops *op;
2433fef5aeeeSSteven Rostedt (Red Hat) unsigned long ip = rec->ip;
243479922b80SSteven Rostedt (Red Hat)
2435fef5aeeeSSteven Rostedt (Red Hat) /*
2436fef5aeeeSSteven Rostedt (Red Hat) * Need to check removed ops first.
2437fef5aeeeSSteven Rostedt (Red Hat) * If they are being removed, and this rec has a tramp,
2438fef5aeeeSSteven Rostedt (Red Hat) * and this rec is in the ops list, then it would be the
2439fef5aeeeSSteven Rostedt (Red Hat) * one with the tramp.
2440fef5aeeeSSteven Rostedt (Red Hat) */
2441fef5aeeeSSteven Rostedt (Red Hat) if (removed_ops) {
2442fef5aeeeSSteven Rostedt (Red Hat) if (hash_contains_ip(ip, &removed_ops->old_hash))
244379922b80SSteven Rostedt (Red Hat) return removed_ops;
244479922b80SSteven Rostedt (Red Hat) }
244579922b80SSteven Rostedt (Red Hat)
2446fef5aeeeSSteven Rostedt (Red Hat) /*
2447fef5aeeeSSteven Rostedt (Red Hat) * Need to find the current trampoline for a rec.
2448fef5aeeeSSteven Rostedt (Red Hat) * Now, a trampoline is only attached to a rec if there
2449fef5aeeeSSteven Rostedt (Red Hat) * was a single 'ops' attached to it. But this can be called
2450fef5aeeeSSteven Rostedt (Red Hat) * when we are adding another op to the rec or removing the
2451fef5aeeeSSteven Rostedt (Red Hat) * current one. Thus, if the op is being added, we can
2452fef5aeeeSSteven Rostedt (Red Hat) * ignore it because it hasn't attached itself to the rec
24534fc40904SSteven Rostedt (Red Hat) * yet.
24544fc40904SSteven Rostedt (Red Hat) *
24554fc40904SSteven Rostedt (Red Hat) * If an ops is being modified (hooking to different functions)
24564fc40904SSteven Rostedt (Red Hat) * then we don't care about the new functions that are being
24574fc40904SSteven Rostedt (Red Hat) * added, just the old ones (that are probably being removed).
24584fc40904SSteven Rostedt (Red Hat) *
24594fc40904SSteven Rostedt (Red Hat) * If we are adding an ops to a function that already is using
24604fc40904SSteven Rostedt (Red Hat) * a trampoline, it needs to be removed (trampolines are only
24614fc40904SSteven Rostedt (Red Hat) * for single ops connected), then an ops that is not being
24624fc40904SSteven Rostedt (Red Hat) * modified also needs to be checked.
2463fef5aeeeSSteven Rostedt (Red Hat) */
246479922b80SSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
2465fef5aeeeSSteven Rostedt (Red Hat)
2466fef5aeeeSSteven Rostedt (Red Hat) if (!op->trampoline)
246779922b80SSteven Rostedt (Red Hat) continue;
246879922b80SSteven Rostedt (Red Hat)
2469fef5aeeeSSteven Rostedt (Red Hat) /*
2470fef5aeeeSSteven Rostedt (Red Hat) * If the ops is being added, it hasn't gotten to
2471fef5aeeeSSteven Rostedt (Red Hat) * the point to be removed from this tree yet.
2472fef5aeeeSSteven Rostedt (Red Hat) */
2473fef5aeeeSSteven Rostedt (Red Hat) if (op->flags & FTRACE_OPS_FL_ADDING)
2474fef5aeeeSSteven Rostedt (Red Hat) continue;
2475fef5aeeeSSteven Rostedt (Red Hat)
2476fef5aeeeSSteven Rostedt (Red Hat)
24774fc40904SSteven Rostedt (Red Hat) /*
24784fc40904SSteven Rostedt (Red Hat) * If the ops is being modified and is in the old
24794fc40904SSteven Rostedt (Red Hat) * hash, then it is probably being removed from this
24804fc40904SSteven Rostedt (Red Hat) * function.
24814fc40904SSteven Rostedt (Red Hat) */
2482fef5aeeeSSteven Rostedt (Red Hat) if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2483fef5aeeeSSteven Rostedt (Red Hat) hash_contains_ip(ip, &op->old_hash))
248479922b80SSteven Rostedt (Red Hat) return op;
24854fc40904SSteven Rostedt (Red Hat) /*
24864fc40904SSteven Rostedt (Red Hat) * If the ops is not being added or modified, and it's
24874fc40904SSteven Rostedt (Red Hat) * in its normal filter hash, then this must be the one
24884fc40904SSteven Rostedt (Red Hat) * we want!
24894fc40904SSteven Rostedt (Red Hat) */
24904fc40904SSteven Rostedt (Red Hat) if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
24914fc40904SSteven Rostedt (Red Hat) hash_contains_ip(ip, op->func_hash))
24924fc40904SSteven Rostedt (Red Hat) return op;
249379922b80SSteven Rostedt (Red Hat)
249479922b80SSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
249579922b80SSteven Rostedt (Red Hat)
249679922b80SSteven Rostedt (Red Hat) return NULL;
249779922b80SSteven Rostedt (Red Hat) }
249879922b80SSteven Rostedt (Red Hat)
249979922b80SSteven Rostedt (Red Hat) static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)250079922b80SSteven Rostedt (Red Hat) ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
250179922b80SSteven Rostedt (Red Hat) {
250279922b80SSteven Rostedt (Red Hat) struct ftrace_ops *op;
2503fef5aeeeSSteven Rostedt (Red Hat) unsigned long ip = rec->ip;
250479922b80SSteven Rostedt (Red Hat)
250579922b80SSteven Rostedt (Red Hat) do_for_each_ftrace_op(op, ftrace_ops_list) {
250679922b80SSteven Rostedt (Red Hat) /* pass rec in as regs to have non-NULL val */
2507fef5aeeeSSteven Rostedt (Red Hat) if (hash_contains_ip(ip, op->func_hash))
250879922b80SSteven Rostedt (Red Hat) return op;
250979922b80SSteven Rostedt (Red Hat) } while_for_each_ftrace_op(op);
251079922b80SSteven Rostedt (Red Hat)
251179922b80SSteven Rostedt (Red Hat) return NULL;
251279922b80SSteven Rostedt (Red Hat) }
251379922b80SSteven Rostedt (Red Hat)
2514cbad0fb2SMark Rutland struct ftrace_ops *
ftrace_find_unique_ops(struct dyn_ftrace * rec)2515cbad0fb2SMark Rutland ftrace_find_unique_ops(struct dyn_ftrace *rec)
2516cbad0fb2SMark Rutland {
2517cbad0fb2SMark Rutland struct ftrace_ops *op, *found = NULL;
2518cbad0fb2SMark Rutland unsigned long ip = rec->ip;
2519cbad0fb2SMark Rutland
2520cbad0fb2SMark Rutland do_for_each_ftrace_op(op, ftrace_ops_list) {
2521cbad0fb2SMark Rutland
2522cbad0fb2SMark Rutland if (hash_contains_ip(ip, op->func_hash)) {
2523cbad0fb2SMark Rutland if (found)
2524cbad0fb2SMark Rutland return NULL;
2525cbad0fb2SMark Rutland found = op;
2526cbad0fb2SMark Rutland }
2527cbad0fb2SMark Rutland
2528cbad0fb2SMark Rutland } while_for_each_ftrace_op(op);
2529cbad0fb2SMark Rutland
2530cbad0fb2SMark Rutland return found;
2531cbad0fb2SMark Rutland }
2532cbad0fb2SMark Rutland
2533763e34e7SSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2534763e34e7SSteven Rostedt (VMware) /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2535a12754a8SSteven Rostedt (Google) static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2536763e34e7SSteven Rostedt (VMware) static DEFINE_MUTEX(direct_mutex);
2537a3ad1a7eSSteven Rostedt (VMware) int ftrace_direct_func_count;
2538763e34e7SSteven Rostedt (VMware)
2539763e34e7SSteven Rostedt (VMware) /*
2540763e34e7SSteven Rostedt (VMware) * Search the direct_functions hash to see if the given instruction pointer
2541763e34e7SSteven Rostedt (VMware) * has a direct caller attached to it.
2542763e34e7SSteven Rostedt (VMware) */
ftrace_find_rec_direct(unsigned long ip)2543ff205766SAlexei Starovoitov unsigned long ftrace_find_rec_direct(unsigned long ip)
2544763e34e7SSteven Rostedt (VMware) {
2545763e34e7SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
2546763e34e7SSteven Rostedt (VMware)
2547763e34e7SSteven Rostedt (VMware) entry = __ftrace_lookup_ip(direct_functions, ip);
2548763e34e7SSteven Rostedt (VMware) if (!entry)
2549763e34e7SSteven Rostedt (VMware) return 0;
2550763e34e7SSteven Rostedt (VMware)
2551763e34e7SSteven Rostedt (VMware) return entry->direct;
2552763e34e7SSteven Rostedt (VMware) }
2553763e34e7SSteven Rostedt (VMware)
call_direct_funcs(unsigned long ip,unsigned long pip,struct ftrace_ops * ops,struct ftrace_regs * fregs)2554763e34e7SSteven Rostedt (VMware) static void call_direct_funcs(unsigned long ip, unsigned long pip,
2555d19ad077SSteven Rostedt (VMware) struct ftrace_ops *ops, struct ftrace_regs *fregs)
2556763e34e7SSteven Rostedt (VMware) {
2557dbaccb61SFlorent Revest unsigned long addr = READ_ONCE(ops->direct_call);
2558763e34e7SSteven Rostedt (VMware)
2559763e34e7SSteven Rostedt (VMware) if (!addr)
2560763e34e7SSteven Rostedt (VMware) return;
2561763e34e7SSteven Rostedt (VMware)
25629705bc70SMark Rutland arch_ftrace_set_direct_caller(fregs, addr);
2563763e34e7SSteven Rostedt (VMware) }
2564763e34e7SSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2565763e34e7SSteven Rostedt (VMware)
25667413af1fSSteven Rostedt (Red Hat) /**
25677413af1fSSteven Rostedt (Red Hat) * ftrace_get_addr_new - Get the call address to set to
25687413af1fSSteven Rostedt (Red Hat) * @rec: The ftrace record descriptor
25697413af1fSSteven Rostedt (Red Hat) *
25707413af1fSSteven Rostedt (Red Hat) * If the record has the FTRACE_FL_REGS set, that means that it
25717413af1fSSteven Rostedt (Red Hat) * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
25725c8c206eSRandy Dunlap * is not set, then it wants to convert to the normal callback.
25737413af1fSSteven Rostedt (Red Hat) *
25747413af1fSSteven Rostedt (Red Hat) * Returns the address of the trampoline to set to
25757413af1fSSteven Rostedt (Red Hat) */
ftrace_get_addr_new(struct dyn_ftrace * rec)25767413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
25777413af1fSSteven Rostedt (Red Hat) {
257879922b80SSteven Rostedt (Red Hat) struct ftrace_ops *ops;
2579763e34e7SSteven Rostedt (VMware) unsigned long addr;
2580763e34e7SSteven Rostedt (VMware)
2581763e34e7SSteven Rostedt (VMware) if ((rec->flags & FTRACE_FL_DIRECT) &&
2582763e34e7SSteven Rostedt (VMware) (ftrace_rec_count(rec) == 1)) {
2583ff205766SAlexei Starovoitov addr = ftrace_find_rec_direct(rec->ip);
2584763e34e7SSteven Rostedt (VMware) if (addr)
2585763e34e7SSteven Rostedt (VMware) return addr;
2586763e34e7SSteven Rostedt (VMware) WARN_ON_ONCE(1);
2587763e34e7SSteven Rostedt (VMware) }
258879922b80SSteven Rostedt (Red Hat)
258979922b80SSteven Rostedt (Red Hat) /* Trampolines take precedence over regs */
259079922b80SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_TRAMP) {
259179922b80SSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_new(rec);
259279922b80SSteven Rostedt (Red Hat) if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2593bce0b6c5SSteven Rostedt (Red Hat) pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2594bce0b6c5SSteven Rostedt (Red Hat) (void *)rec->ip, (void *)rec->ip, rec->flags);
259579922b80SSteven Rostedt (Red Hat) /* Ftrace is shutting down, return anything */
259679922b80SSteven Rostedt (Red Hat) return (unsigned long)FTRACE_ADDR;
259779922b80SSteven Rostedt (Red Hat) }
259879922b80SSteven Rostedt (Red Hat) return ops->trampoline;
259979922b80SSteven Rostedt (Red Hat) }
260079922b80SSteven Rostedt (Red Hat)
26017413af1fSSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_REGS)
26027413af1fSSteven Rostedt (Red Hat) return (unsigned long)FTRACE_REGS_ADDR;
26037413af1fSSteven Rostedt (Red Hat) else
26047413af1fSSteven Rostedt (Red Hat) return (unsigned long)FTRACE_ADDR;
26057413af1fSSteven Rostedt (Red Hat) }
26067413af1fSSteven Rostedt (Red Hat)
26077413af1fSSteven Rostedt (Red Hat) /**
26087413af1fSSteven Rostedt (Red Hat) * ftrace_get_addr_curr - Get the call address that is already there
26097413af1fSSteven Rostedt (Red Hat) * @rec: The ftrace record descriptor
26107413af1fSSteven Rostedt (Red Hat) *
26117413af1fSSteven Rostedt (Red Hat) * The FTRACE_FL_REGS_EN is set when the record already points to
26127413af1fSSteven Rostedt (Red Hat) * a function that saves all the regs. Basically the '_EN' version
26137413af1fSSteven Rostedt (Red Hat) * represents the current state of the function.
26147413af1fSSteven Rostedt (Red Hat) *
26157413af1fSSteven Rostedt (Red Hat) * Returns the address of the trampoline that is currently being called
26167413af1fSSteven Rostedt (Red Hat) */
ftrace_get_addr_curr(struct dyn_ftrace * rec)26177413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
26187413af1fSSteven Rostedt (Red Hat) {
261979922b80SSteven Rostedt (Red Hat) struct ftrace_ops *ops;
2620763e34e7SSteven Rostedt (VMware) unsigned long addr;
2621763e34e7SSteven Rostedt (VMware)
2622763e34e7SSteven Rostedt (VMware) /* Direct calls take precedence over trampolines */
2623763e34e7SSteven Rostedt (VMware) if (rec->flags & FTRACE_FL_DIRECT_EN) {
2624ff205766SAlexei Starovoitov addr = ftrace_find_rec_direct(rec->ip);
2625763e34e7SSteven Rostedt (VMware) if (addr)
2626763e34e7SSteven Rostedt (VMware) return addr;
2627763e34e7SSteven Rostedt (VMware) WARN_ON_ONCE(1);
2628763e34e7SSteven Rostedt (VMware) }
262979922b80SSteven Rostedt (Red Hat)
263079922b80SSteven Rostedt (Red Hat) /* Trampolines take precedence over regs */
263179922b80SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_TRAMP_EN) {
263279922b80SSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_curr(rec);
263379922b80SSteven Rostedt (Red Hat) if (FTRACE_WARN_ON(!ops)) {
2634a395d6a7SJoe Perches pr_warn("Bad trampoline accounting at: %p (%pS)\n",
263579922b80SSteven Rostedt (Red Hat) (void *)rec->ip, (void *)rec->ip);
263679922b80SSteven Rostedt (Red Hat) /* Ftrace is shutting down, return anything */
263779922b80SSteven Rostedt (Red Hat) return (unsigned long)FTRACE_ADDR;
263879922b80SSteven Rostedt (Red Hat) }
263979922b80SSteven Rostedt (Red Hat) return ops->trampoline;
264079922b80SSteven Rostedt (Red Hat) }
264179922b80SSteven Rostedt (Red Hat)
26427413af1fSSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_REGS_EN)
26437413af1fSSteven Rostedt (Red Hat) return (unsigned long)FTRACE_REGS_ADDR;
26447413af1fSSteven Rostedt (Red Hat) else
26457413af1fSSteven Rostedt (Red Hat) return (unsigned long)FTRACE_ADDR;
26467413af1fSSteven Rostedt (Red Hat) }
26477413af1fSSteven Rostedt (Red Hat)
2648c88fd863SSteven Rostedt static int
__ftrace_replace_code(struct dyn_ftrace * rec,bool enable)26497375dca1SSteven Rostedt (VMware) __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2650c88fd863SSteven Rostedt {
265108f6fba5SSteven Rostedt unsigned long ftrace_old_addr;
2652c88fd863SSteven Rostedt unsigned long ftrace_addr;
2653c88fd863SSteven Rostedt int ret;
2654c88fd863SSteven Rostedt
26557c0868e0SSteven Rostedt (Red Hat) ftrace_addr = ftrace_get_addr_new(rec);
2656c88fd863SSteven Rostedt
26577c0868e0SSteven Rostedt (Red Hat) /* This needs to be done before we call ftrace_update_record */
26587c0868e0SSteven Rostedt (Red Hat) ftrace_old_addr = ftrace_get_addr_curr(rec);
26597c0868e0SSteven Rostedt (Red Hat)
26607c0868e0SSteven Rostedt (Red Hat) ret = ftrace_update_record(rec, enable);
266108f6fba5SSteven Rostedt
266202a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_UNKNOWN;
266302a392a0SSteven Rostedt (Red Hat)
2664c88fd863SSteven Rostedt switch (ret) {
2665c88fd863SSteven Rostedt case FTRACE_UPDATE_IGNORE:
2666c88fd863SSteven Rostedt return 0;
2667c88fd863SSteven Rostedt
2668c88fd863SSteven Rostedt case FTRACE_UPDATE_MAKE_CALL:
266902a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_CALL;
2670c88fd863SSteven Rostedt return ftrace_make_call(rec, ftrace_addr);
2671c88fd863SSteven Rostedt
2672c88fd863SSteven Rostedt case FTRACE_UPDATE_MAKE_NOP:
267302a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_NOP;
267439b5552cSSteven Rostedt (Red Hat) return ftrace_make_nop(NULL, rec, ftrace_old_addr);
267508f6fba5SSteven Rostedt
267608f6fba5SSteven Rostedt case FTRACE_UPDATE_MODIFY_CALL:
267702a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_UPDATE;
267808f6fba5SSteven Rostedt return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
26793d083395SSteven Rostedt }
26803d083395SSteven Rostedt
26819efb85c5SHariprasad Kelam return -1; /* unknown ftrace bug */
2682c88fd863SSteven Rostedt }
2683c88fd863SSteven Rostedt
ftrace_replace_code(int mod_flags)2684a0572f68SSteven Rostedt (VMware) void __weak ftrace_replace_code(int mod_flags)
26853d083395SSteven Rostedt {
26863d083395SSteven Rostedt struct dyn_ftrace *rec;
26873d083395SSteven Rostedt struct ftrace_page *pg;
26887375dca1SSteven Rostedt (VMware) bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2689a0572f68SSteven Rostedt (VMware) int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
26906a24a244SSteven Rostedt int failed;
26913d083395SSteven Rostedt
269245a4a237SSteven Rostedt if (unlikely(ftrace_disabled))
269345a4a237SSteven Rostedt return;
269445a4a237SSteven Rostedt
2695265c831cSSteven Rostedt do_for_each_ftrace_rec(pg, rec) {
2696546fece4SSteven Rostedt (Red Hat)
2697cf04f2d5SSteven Rostedt (Google) if (skip_record(rec))
2698546fece4SSteven Rostedt (Red Hat) continue;
2699546fece4SSteven Rostedt (Red Hat)
2700e4f5d544SSteven Rostedt failed = __ftrace_replace_code(rec, enable);
2701fa9d13cfSZhaolei if (failed) {
27024fd3279bSSteven Rostedt (Red Hat) ftrace_bug(failed, rec);
27034377245aSSteven Rostedt /* Stop processing */
27044377245aSSteven Rostedt return;
27054377245aSSteven Rostedt }
2706a0572f68SSteven Rostedt (VMware) if (schedulable)
2707a0572f68SSteven Rostedt (VMware) cond_resched();
2708265c831cSSteven Rostedt } while_for_each_ftrace_rec();
27093d083395SSteven Rostedt }
27103c1720f0SSteven Rostedt
2711c88fd863SSteven Rostedt struct ftrace_rec_iter {
2712c88fd863SSteven Rostedt struct ftrace_page *pg;
2713c88fd863SSteven Rostedt int index;
2714c88fd863SSteven Rostedt };
2715c88fd863SSteven Rostedt
2716c88fd863SSteven Rostedt /**
27176130722fSSteven Rostedt (VMware) * ftrace_rec_iter_start - start up iterating over traced functions
2718c88fd863SSteven Rostedt *
2719c88fd863SSteven Rostedt * Returns an iterator handle that is used to iterate over all
2720c88fd863SSteven Rostedt * the records that represent address locations where functions
2721c88fd863SSteven Rostedt * are traced.
2722c88fd863SSteven Rostedt *
2723c88fd863SSteven Rostedt * May return NULL if no records are available.
2724c88fd863SSteven Rostedt */
ftrace_rec_iter_start(void)2725c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2726c88fd863SSteven Rostedt {
2727c88fd863SSteven Rostedt /*
2728c88fd863SSteven Rostedt * We only use a single iterator.
2729c88fd863SSteven Rostedt * Protected by the ftrace_lock mutex.
2730c88fd863SSteven Rostedt */
2731c88fd863SSteven Rostedt static struct ftrace_rec_iter ftrace_rec_iter;
2732c88fd863SSteven Rostedt struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2733c88fd863SSteven Rostedt
2734c88fd863SSteven Rostedt iter->pg = ftrace_pages_start;
2735c88fd863SSteven Rostedt iter->index = 0;
2736c88fd863SSteven Rostedt
2737c88fd863SSteven Rostedt /* Could have empty pages */
2738c88fd863SSteven Rostedt while (iter->pg && !iter->pg->index)
2739c88fd863SSteven Rostedt iter->pg = iter->pg->next;
2740c88fd863SSteven Rostedt
2741c88fd863SSteven Rostedt if (!iter->pg)
2742c88fd863SSteven Rostedt return NULL;
2743c88fd863SSteven Rostedt
2744c88fd863SSteven Rostedt return iter;
2745c88fd863SSteven Rostedt }
2746c88fd863SSteven Rostedt
2747c88fd863SSteven Rostedt /**
27486130722fSSteven Rostedt (VMware) * ftrace_rec_iter_next - get the next record to process.
2749c88fd863SSteven Rostedt * @iter: The handle to the iterator.
2750c88fd863SSteven Rostedt *
2751c88fd863SSteven Rostedt * Returns the next iterator after the given iterator @iter.
2752c88fd863SSteven Rostedt */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2753c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2754c88fd863SSteven Rostedt {
2755c88fd863SSteven Rostedt iter->index++;
2756c88fd863SSteven Rostedt
2757c88fd863SSteven Rostedt if (iter->index >= iter->pg->index) {
2758c88fd863SSteven Rostedt iter->pg = iter->pg->next;
2759c88fd863SSteven Rostedt iter->index = 0;
2760c88fd863SSteven Rostedt
2761c88fd863SSteven Rostedt /* Could have empty pages */
2762c88fd863SSteven Rostedt while (iter->pg && !iter->pg->index)
2763c88fd863SSteven Rostedt iter->pg = iter->pg->next;
2764c88fd863SSteven Rostedt }
2765c88fd863SSteven Rostedt
2766c88fd863SSteven Rostedt if (!iter->pg)
2767c88fd863SSteven Rostedt return NULL;
2768c88fd863SSteven Rostedt
2769c88fd863SSteven Rostedt return iter;
2770c88fd863SSteven Rostedt }
2771c88fd863SSteven Rostedt
2772c88fd863SSteven Rostedt /**
27736130722fSSteven Rostedt (VMware) * ftrace_rec_iter_record - get the record at the iterator location
2774c88fd863SSteven Rostedt * @iter: The current iterator location
2775c88fd863SSteven Rostedt *
2776c88fd863SSteven Rostedt * Returns the record that the current @iter is at.
2777c88fd863SSteven Rostedt */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2778c88fd863SSteven Rostedt struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2779c88fd863SSteven Rostedt {
2780c88fd863SSteven Rostedt return &iter->pg->records[iter->index];
2781c88fd863SSteven Rostedt }
2782c88fd863SSteven Rostedt
27833b47bfc1SSteven Rostedt static int
ftrace_nop_initialize(struct module * mod,struct dyn_ftrace * rec)2784fbf6c73cSMark Rutland ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
27853c1720f0SSteven Rostedt {
2786492a7ea5SAbhishek Sagar int ret;
278737ad5084SSteven Rostedt
278845a4a237SSteven Rostedt if (unlikely(ftrace_disabled))
278945a4a237SSteven Rostedt return 0;
279045a4a237SSteven Rostedt
2791fbf6c73cSMark Rutland ret = ftrace_init_nop(mod, rec);
27923d083395SSteven Rostedt if (ret) {
279302a392a0SSteven Rostedt (Red Hat) ftrace_bug_type = FTRACE_BUG_INIT;
27944fd3279bSSteven Rostedt (Red Hat) ftrace_bug(ret, rec);
27953d083395SSteven Rostedt return 0;
2796d61f82d0SSteven Rostedt }
2797d61f82d0SSteven Rostedt return 1;
27983d083395SSteven Rostedt }
27993d083395SSteven Rostedt
2800000ab691SSteven Rostedt /*
2801000ab691SSteven Rostedt * archs can override this function if they must do something
2802000ab691SSteven Rostedt * before the modifying code is performed.
2803000ab691SSteven Rostedt */
ftrace_arch_code_modify_prepare(void)28043a2bfec0SLi kunyu void __weak ftrace_arch_code_modify_prepare(void)
2805000ab691SSteven Rostedt {
2806000ab691SSteven Rostedt }
2807000ab691SSteven Rostedt
2808000ab691SSteven Rostedt /*
2809000ab691SSteven Rostedt * archs can override this function if they must do something
2810000ab691SSteven Rostedt * after the modifying code is performed.
2811000ab691SSteven Rostedt */
ftrace_arch_code_modify_post_process(void)28123a2bfec0SLi kunyu void __weak ftrace_arch_code_modify_post_process(void)
2813000ab691SSteven Rostedt {
2814000ab691SSteven Rostedt }
2815000ab691SSteven Rostedt
update_ftrace_func(ftrace_func_t func)2816bd604f3dSSteven Rostedt (Google) static int update_ftrace_func(ftrace_func_t func)
2817bd604f3dSSteven Rostedt (Google) {
2818bd604f3dSSteven Rostedt (Google) static ftrace_func_t save_func;
2819bd604f3dSSteven Rostedt (Google)
2820bd604f3dSSteven Rostedt (Google) /* Avoid updating if it hasn't changed */
2821bd604f3dSSteven Rostedt (Google) if (func == save_func)
2822bd604f3dSSteven Rostedt (Google) return 0;
2823bd604f3dSSteven Rostedt (Google)
2824bd604f3dSSteven Rostedt (Google) save_func = func;
2825bd604f3dSSteven Rostedt (Google)
2826bd604f3dSSteven Rostedt (Google) return ftrace_update_ftrace_func(func);
2827bd604f3dSSteven Rostedt (Google) }
2828bd604f3dSSteven Rostedt (Google)
ftrace_modify_all_code(int command)28298ed3e2cfSSteven Rostedt void ftrace_modify_all_code(int command)
28308ed3e2cfSSteven Rostedt {
283159338f75SSteven Rostedt (Red Hat) int update = command & FTRACE_UPDATE_TRACE_FUNC;
2832a0572f68SSteven Rostedt (VMware) int mod_flags = 0;
2833cd21067fSPetr Mladek int err = 0;
283459338f75SSteven Rostedt (Red Hat)
2835a0572f68SSteven Rostedt (VMware) if (command & FTRACE_MAY_SLEEP)
2836a0572f68SSteven Rostedt (VMware) mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2837a0572f68SSteven Rostedt (VMware)
283859338f75SSteven Rostedt (Red Hat) /*
283959338f75SSteven Rostedt (Red Hat) * If the ftrace_caller calls a ftrace_ops func directly,
284059338f75SSteven Rostedt (Red Hat) * we need to make sure that it only traces functions it
284159338f75SSteven Rostedt (Red Hat) * expects to trace. When doing the switch of functions,
284259338f75SSteven Rostedt (Red Hat) * we need to update to the ftrace_ops_list_func first
284359338f75SSteven Rostedt (Red Hat) * before the transition between old and new calls are set,
284459338f75SSteven Rostedt (Red Hat) * as the ftrace_ops_list_func will check the ops hashes
284559338f75SSteven Rostedt (Red Hat) * to make sure the ops are having the right functions
284659338f75SSteven Rostedt (Red Hat) * traced.
284759338f75SSteven Rostedt (Red Hat) */
2848cd21067fSPetr Mladek if (update) {
2849bd604f3dSSteven Rostedt (Google) err = update_ftrace_func(ftrace_ops_list_func);
2850cd21067fSPetr Mladek if (FTRACE_WARN_ON(err))
2851cd21067fSPetr Mladek return;
2852cd21067fSPetr Mladek }
285359338f75SSteven Rostedt (Red Hat)
28548ed3e2cfSSteven Rostedt if (command & FTRACE_UPDATE_CALLS)
2855a0572f68SSteven Rostedt (VMware) ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
28568ed3e2cfSSteven Rostedt else if (command & FTRACE_DISABLE_CALLS)
2857a0572f68SSteven Rostedt (VMware) ftrace_replace_code(mod_flags);
28588ed3e2cfSSteven Rostedt
2859405e1d83SSteven Rostedt (Red Hat) if (update && ftrace_trace_function != ftrace_ops_list_func) {
2860405e1d83SSteven Rostedt (Red Hat) function_trace_op = set_function_trace_op;
2861405e1d83SSteven Rostedt (Red Hat) smp_wmb();
2862405e1d83SSteven Rostedt (Red Hat) /* If irqs are disabled, we are in stop machine */
2863405e1d83SSteven Rostedt (Red Hat) if (!irqs_disabled())
2864405e1d83SSteven Rostedt (Red Hat) smp_call_function(ftrace_sync_ipi, NULL, 1);
2865bd604f3dSSteven Rostedt (Google) err = update_ftrace_func(ftrace_trace_function);
2866cd21067fSPetr Mladek if (FTRACE_WARN_ON(err))
2867cd21067fSPetr Mladek return;
2868405e1d83SSteven Rostedt (Red Hat) }
28698ed3e2cfSSteven Rostedt
28708ed3e2cfSSteven Rostedt if (command & FTRACE_START_FUNC_RET)
2871cd21067fSPetr Mladek err = ftrace_enable_ftrace_graph_caller();
28728ed3e2cfSSteven Rostedt else if (command & FTRACE_STOP_FUNC_RET)
2873cd21067fSPetr Mladek err = ftrace_disable_ftrace_graph_caller();
2874cd21067fSPetr Mladek FTRACE_WARN_ON(err);
28758ed3e2cfSSteven Rostedt }
28768ed3e2cfSSteven Rostedt
__ftrace_modify_code(void * data)28773d083395SSteven Rostedt static int __ftrace_modify_code(void *data)
28783d083395SSteven Rostedt {
28793d083395SSteven Rostedt int *command = data;
28803d083395SSteven Rostedt
28818ed3e2cfSSteven Rostedt ftrace_modify_all_code(*command);
28825a45cfe1SSteven Rostedt
2883c88fd863SSteven Rostedt return 0;
2884c88fd863SSteven Rostedt }
2885c88fd863SSteven Rostedt
2886c88fd863SSteven Rostedt /**
28876130722fSSteven Rostedt (VMware) * ftrace_run_stop_machine - go back to the stop machine method
2888c88fd863SSteven Rostedt * @command: The command to tell ftrace what to do
2889c88fd863SSteven Rostedt *
2890c88fd863SSteven Rostedt * If an arch needs to fall back to the stop machine method, the
2891c88fd863SSteven Rostedt * it can call this function.
2892c88fd863SSteven Rostedt */
ftrace_run_stop_machine(int command)2893c88fd863SSteven Rostedt void ftrace_run_stop_machine(int command)
2894c88fd863SSteven Rostedt {
2895c88fd863SSteven Rostedt stop_machine(__ftrace_modify_code, &command, NULL);
2896c88fd863SSteven Rostedt }
2897c88fd863SSteven Rostedt
2898c88fd863SSteven Rostedt /**
28996130722fSSteven Rostedt (VMware) * arch_ftrace_update_code - modify the code to trace or not trace
2900c88fd863SSteven Rostedt * @command: The command that needs to be done
2901c88fd863SSteven Rostedt *
2902c88fd863SSteven Rostedt * Archs can override this function if it does not need to
2903c88fd863SSteven Rostedt * run stop_machine() to modify code.
2904c88fd863SSteven Rostedt */
arch_ftrace_update_code(int command)2905c88fd863SSteven Rostedt void __weak arch_ftrace_update_code(int command)
2906c88fd863SSteven Rostedt {
2907c88fd863SSteven Rostedt ftrace_run_stop_machine(command);
2908c88fd863SSteven Rostedt }
2909c88fd863SSteven Rostedt
ftrace_run_update_code(int command)2910c88fd863SSteven Rostedt static void ftrace_run_update_code(int command)
2911c88fd863SSteven Rostedt {
29123a2bfec0SLi kunyu ftrace_arch_code_modify_prepare();
2913c88fd863SSteven Rostedt
2914c88fd863SSteven Rostedt /*
2915c88fd863SSteven Rostedt * By default we use stop_machine() to modify the code.
2916c88fd863SSteven Rostedt * But archs can do what ever they want as long as it
2917c88fd863SSteven Rostedt * is safe. The stop_machine() is the safest, but also
2918c88fd863SSteven Rostedt * produces the most overhead.
2919c88fd863SSteven Rostedt */
2920c88fd863SSteven Rostedt arch_ftrace_update_code(command);
2921c88fd863SSteven Rostedt
29223a2bfec0SLi kunyu ftrace_arch_code_modify_post_process();
29233d083395SSteven Rostedt }
29243d083395SSteven Rostedt
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)29258252ecf3SSteven Rostedt (Red Hat) static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
29267485058eSSteven Rostedt (Red Hat) struct ftrace_ops_hash *old_hash)
2927e1effa01SSteven Rostedt (Red Hat) {
2928e1effa01SSteven Rostedt (Red Hat) ops->flags |= FTRACE_OPS_FL_MODIFYING;
29297485058eSSteven Rostedt (Red Hat) ops->old_hash.filter_hash = old_hash->filter_hash;
29307485058eSSteven Rostedt (Red Hat) ops->old_hash.notrace_hash = old_hash->notrace_hash;
2931e1effa01SSteven Rostedt (Red Hat) ftrace_run_update_code(command);
29328252ecf3SSteven Rostedt (Red Hat) ops->old_hash.filter_hash = NULL;
29337485058eSSteven Rostedt (Red Hat) ops->old_hash.notrace_hash = NULL;
2934e1effa01SSteven Rostedt (Red Hat) ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2935e1effa01SSteven Rostedt (Red Hat) }
2936e1effa01SSteven Rostedt (Red Hat)
29373d083395SSteven Rostedt static ftrace_func_t saved_ftrace_func;
293860a7ecf4SSteven Rostedt static int ftrace_start_up;
2939df4fc315SSteven Rostedt
arch_ftrace_trampoline_free(struct ftrace_ops * ops)294012cce594SSteven Rostedt (Red Hat) void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
294112cce594SSteven Rostedt (Red Hat) {
294212cce594SSteven Rostedt (Red Hat) }
294312cce594SSteven Rostedt (Red Hat)
2944fc0ea795SAdrian Hunter /* List of trace_ops that have allocated trampolines */
2945fc0ea795SAdrian Hunter static LIST_HEAD(ftrace_ops_trampoline_list);
2946fc0ea795SAdrian Hunter
ftrace_add_trampoline_to_kallsyms(struct ftrace_ops * ops)2947fc0ea795SAdrian Hunter static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2948fc0ea795SAdrian Hunter {
2949fc0ea795SAdrian Hunter lockdep_assert_held(&ftrace_lock);
2950fc0ea795SAdrian Hunter list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2951fc0ea795SAdrian Hunter }
2952fc0ea795SAdrian Hunter
ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops * ops)2953fc0ea795SAdrian Hunter static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2954fc0ea795SAdrian Hunter {
2955fc0ea795SAdrian Hunter lockdep_assert_held(&ftrace_lock);
2956fc0ea795SAdrian Hunter list_del_rcu(&ops->list);
2957478ece95SAdrian Hunter synchronize_rcu();
2958fc0ea795SAdrian Hunter }
2959fc0ea795SAdrian Hunter
2960fc0ea795SAdrian Hunter /*
2961fc0ea795SAdrian Hunter * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2962fc0ea795SAdrian Hunter * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2963fc0ea795SAdrian Hunter * not a module.
2964fc0ea795SAdrian Hunter */
2965fc0ea795SAdrian Hunter #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2966fc0ea795SAdrian Hunter #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2967fc0ea795SAdrian Hunter
ftrace_trampoline_free(struct ftrace_ops * ops)2968fc0ea795SAdrian Hunter static void ftrace_trampoline_free(struct ftrace_ops *ops)
2969fc0ea795SAdrian Hunter {
2970fc0ea795SAdrian Hunter if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2971dd9ddf46SAdrian Hunter ops->trampoline) {
2972548e1f6cSAdrian Hunter /*
2973548e1f6cSAdrian Hunter * Record the text poke event before the ksymbol unregister
2974548e1f6cSAdrian Hunter * event.
2975548e1f6cSAdrian Hunter */
2976548e1f6cSAdrian Hunter perf_event_text_poke((void *)ops->trampoline,
2977548e1f6cSAdrian Hunter (void *)ops->trampoline,
2978548e1f6cSAdrian Hunter ops->trampoline_size, NULL, 0);
2979dd9ddf46SAdrian Hunter perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2980dd9ddf46SAdrian Hunter ops->trampoline, ops->trampoline_size,
2981dd9ddf46SAdrian Hunter true, FTRACE_TRAMPOLINE_SYM);
2982dd9ddf46SAdrian Hunter /* Remove from kallsyms after the perf events */
2983fc0ea795SAdrian Hunter ftrace_remove_trampoline_from_kallsyms(ops);
2984dd9ddf46SAdrian Hunter }
2985fc0ea795SAdrian Hunter
2986fc0ea795SAdrian Hunter arch_ftrace_trampoline_free(ops);
2987fc0ea795SAdrian Hunter }
2988fc0ea795SAdrian Hunter
ftrace_startup_enable(int command)2989df4fc315SSteven Rostedt static void ftrace_startup_enable(int command)
2990df4fc315SSteven Rostedt {
2991df4fc315SSteven Rostedt if (saved_ftrace_func != ftrace_trace_function) {
2992df4fc315SSteven Rostedt saved_ftrace_func = ftrace_trace_function;
2993df4fc315SSteven Rostedt command |= FTRACE_UPDATE_TRACE_FUNC;
2994df4fc315SSteven Rostedt }
2995df4fc315SSteven Rostedt
2996df4fc315SSteven Rostedt if (!command || !ftrace_enabled)
2997df4fc315SSteven Rostedt return;
2998df4fc315SSteven Rostedt
2999df4fc315SSteven Rostedt ftrace_run_update_code(command);
3000df4fc315SSteven Rostedt }
30013d083395SSteven Rostedt
ftrace_startup_all(int command)3002e1effa01SSteven Rostedt (Red Hat) static void ftrace_startup_all(int command)
3003e1effa01SSteven Rostedt (Red Hat) {
3004e1effa01SSteven Rostedt (Red Hat) update_all_ops = true;
3005e1effa01SSteven Rostedt (Red Hat) ftrace_startup_enable(command);
3006e1effa01SSteven Rostedt (Red Hat) update_all_ops = false;
3007e1effa01SSteven Rostedt (Red Hat) }
3008e1effa01SSteven Rostedt (Red Hat)
ftrace_startup(struct ftrace_ops * ops,int command)30093306fc4aSSteven Rostedt (VMware) int ftrace_startup(struct ftrace_ops *ops, int command)
30103d083395SSteven Rostedt {
30118a56d776SSteven Rostedt (Red Hat) int ret;
3012b848914cSSteven Rostedt
30134eebcc81SSteven Rostedt if (unlikely(ftrace_disabled))
3014a1cd6173SSteven Rostedt return -ENODEV;
30154eebcc81SSteven Rostedt
30168a56d776SSteven Rostedt (Red Hat) ret = __register_ftrace_function(ops);
30178a56d776SSteven Rostedt (Red Hat) if (ret)
30188a56d776SSteven Rostedt (Red Hat) return ret;
30198a56d776SSteven Rostedt (Red Hat)
302060a7ecf4SSteven Rostedt ftrace_start_up++;
30213d083395SSteven Rostedt
3022e1effa01SSteven Rostedt (Red Hat) /*
3023e1effa01SSteven Rostedt (Red Hat) * Note that ftrace probes uses this to start up
3024e1effa01SSteven Rostedt (Red Hat) * and modify functions it will probe. But we still
3025e1effa01SSteven Rostedt (Red Hat) * set the ADDING flag for modification, as probes
3026e1effa01SSteven Rostedt (Red Hat) * do not have trampolines. If they add them in the
3027e1effa01SSteven Rostedt (Red Hat) * future, then the probes will need to distinguish
3028e1effa01SSteven Rostedt (Red Hat) * between adding and updating probes.
3029e1effa01SSteven Rostedt (Red Hat) */
3030e1effa01SSteven Rostedt (Red Hat) ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
303166209a5bSSteven Rostedt (Red Hat)
3032f8b8be8aSMasami Hiramatsu ret = ftrace_hash_ipmodify_enable(ops);
3033f8b8be8aSMasami Hiramatsu if (ret < 0) {
3034f8b8be8aSMasami Hiramatsu /* Rollback registration process */
3035f8b8be8aSMasami Hiramatsu __unregister_ftrace_function(ops);
3036f8b8be8aSMasami Hiramatsu ftrace_start_up--;
3037f8b8be8aSMasami Hiramatsu ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3038d5e47505SMiroslav Benes if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3039d5e47505SMiroslav Benes ftrace_trampoline_free(ops);
3040f8b8be8aSMasami Hiramatsu return ret;
3041f8b8be8aSMasami Hiramatsu }
3042f8b8be8aSMasami Hiramatsu
30437f50d06bSJiri Olsa if (ftrace_hash_rec_enable(ops, 1))
30447f50d06bSJiri Olsa command |= FTRACE_UPDATE_CALLS;
3045ed926f9bSSteven Rostedt
3046df4fc315SSteven Rostedt ftrace_startup_enable(command);
3047a1cd6173SSteven Rostedt
3048c3b0f72eSYang Jihong /*
3049c3b0f72eSYang Jihong * If ftrace is in an undefined state, we just remove ops from list
3050c3b0f72eSYang Jihong * to prevent the NULL pointer, instead of totally rolling it back and
3051c3b0f72eSYang Jihong * free trampoline, because those actions could cause further damage.
3052c3b0f72eSYang Jihong */
3053c3b0f72eSYang Jihong if (unlikely(ftrace_disabled)) {
3054c3b0f72eSYang Jihong __unregister_ftrace_function(ops);
3055c3b0f72eSYang Jihong return -ENODEV;
3056c3b0f72eSYang Jihong }
3057c3b0f72eSYang Jihong
3058e1effa01SSteven Rostedt (Red Hat) ops->flags &= ~FTRACE_OPS_FL_ADDING;
3059e1effa01SSteven Rostedt (Red Hat)
3060a1cd6173SSteven Rostedt return 0;
30613d083395SSteven Rostedt }
30623d083395SSteven Rostedt
ftrace_shutdown(struct ftrace_ops * ops,int command)30633306fc4aSSteven Rostedt (VMware) int ftrace_shutdown(struct ftrace_ops *ops, int command)
30643d083395SSteven Rostedt {
30658a56d776SSteven Rostedt (Red Hat) int ret;
3066b848914cSSteven Rostedt
30674eebcc81SSteven Rostedt if (unlikely(ftrace_disabled))
30688a56d776SSteven Rostedt (Red Hat) return -ENODEV;
30698a56d776SSteven Rostedt (Red Hat)
30708a56d776SSteven Rostedt (Red Hat) ret = __unregister_ftrace_function(ops);
30718a56d776SSteven Rostedt (Red Hat) if (ret)
30728a56d776SSteven Rostedt (Red Hat) return ret;
30734eebcc81SSteven Rostedt
307460a7ecf4SSteven Rostedt ftrace_start_up--;
30759ea1a153SFrederic Weisbecker /*
30769ea1a153SFrederic Weisbecker * Just warn in case of unbalance, no need to kill ftrace, it's not
30779ea1a153SFrederic Weisbecker * critical but the ftrace_call callers may be never nopped again after
30789ea1a153SFrederic Weisbecker * further ftrace uses.
30799ea1a153SFrederic Weisbecker */
30809ea1a153SFrederic Weisbecker WARN_ON_ONCE(ftrace_start_up < 0);
30819ea1a153SFrederic Weisbecker
3082f8b8be8aSMasami Hiramatsu /* Disabling ipmodify never fails */
3083f8b8be8aSMasami Hiramatsu ftrace_hash_ipmodify_disable(ops);
30847f50d06bSJiri Olsa
30857f50d06bSJiri Olsa if (ftrace_hash_rec_disable(ops, 1))
30867f50d06bSJiri Olsa command |= FTRACE_UPDATE_CALLS;
3087ed926f9bSSteven Rostedt
3088ed926f9bSSteven Rostedt ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3089b848914cSSteven Rostedt
3090d61f82d0SSteven Rostedt if (saved_ftrace_func != ftrace_trace_function) {
3091d61f82d0SSteven Rostedt saved_ftrace_func = ftrace_trace_function;
3092d61f82d0SSteven Rostedt command |= FTRACE_UPDATE_TRACE_FUNC;
3093d61f82d0SSteven Rostedt }
3094d61f82d0SSteven Rostedt
30950e792b89SLi Huafei if (!command || !ftrace_enabled)
30960e792b89SLi Huafei goto out;
30973d083395SSteven Rostedt
309879922b80SSteven Rostedt (Red Hat) /*
309979922b80SSteven Rostedt (Red Hat) * If the ops uses a trampoline, then it needs to be
310079922b80SSteven Rostedt (Red Hat) * tested first on update.
310179922b80SSteven Rostedt (Red Hat) */
3102e1effa01SSteven Rostedt (Red Hat) ops->flags |= FTRACE_OPS_FL_REMOVING;
310379922b80SSteven Rostedt (Red Hat) removed_ops = ops;
310479922b80SSteven Rostedt (Red Hat)
3105fef5aeeeSSteven Rostedt (Red Hat) /* The trampoline logic checks the old hashes */
3106fef5aeeeSSteven Rostedt (Red Hat) ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3107fef5aeeeSSteven Rostedt (Red Hat) ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3108fef5aeeeSSteven Rostedt (Red Hat)
3109d61f82d0SSteven Rostedt ftrace_run_update_code(command);
3110a4c35ed2SSteven Rostedt (Red Hat)
311184bde62cSSteven Rostedt (Red Hat) /*
311284bde62cSSteven Rostedt (Red Hat) * If there's no more ops registered with ftrace, run a
311384bde62cSSteven Rostedt (Red Hat) * sanity check to make sure all rec flags are cleared.
311484bde62cSSteven Rostedt (Red Hat) */
3115f86f4180SChunyan Zhang if (rcu_dereference_protected(ftrace_ops_list,
3116f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
311784bde62cSSteven Rostedt (Red Hat) struct ftrace_page *pg;
311884bde62cSSteven Rostedt (Red Hat) struct dyn_ftrace *rec;
311984bde62cSSteven Rostedt (Red Hat)
312084bde62cSSteven Rostedt (Red Hat) do_for_each_ftrace_rec(pg, rec) {
3121e11b521aSSteven Rostedt (Google) if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
312284bde62cSSteven Rostedt (Red Hat) pr_warn(" %pS flags:%lx\n",
312384bde62cSSteven Rostedt (Red Hat) (void *)rec->ip, rec->flags);
312484bde62cSSteven Rostedt (Red Hat) } while_for_each_ftrace_rec();
312584bde62cSSteven Rostedt (Red Hat) }
312684bde62cSSteven Rostedt (Red Hat)
3127fef5aeeeSSteven Rostedt (Red Hat) ops->old_hash.filter_hash = NULL;
3128fef5aeeeSSteven Rostedt (Red Hat) ops->old_hash.notrace_hash = NULL;
3129fef5aeeeSSteven Rostedt (Red Hat)
3130fef5aeeeSSteven Rostedt (Red Hat) removed_ops = NULL;
3131e1effa01SSteven Rostedt (Red Hat) ops->flags &= ~FTRACE_OPS_FL_REMOVING;
313279922b80SSteven Rostedt (Red Hat)
31330e792b89SLi Huafei out:
3134a4c35ed2SSteven Rostedt (Red Hat) /*
3135a4c35ed2SSteven Rostedt (Red Hat) * Dynamic ops may be freed, we must make sure that all
3136a4c35ed2SSteven Rostedt (Red Hat) * callers are done before leaving this function.
31370598e4f0SSteven Rostedt (VMware) */
3138b3a88803SPeter Zijlstra if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
31390598e4f0SSteven Rostedt (VMware) /*
3140a4c35ed2SSteven Rostedt (Red Hat) * We need to do a hard force of sched synchronization.
3141a4c35ed2SSteven Rostedt (Red Hat) * This is because we use preempt_disable() to do RCU, but
3142a4c35ed2SSteven Rostedt (Red Hat) * the function tracers can be called where RCU is not watching
3143a4c35ed2SSteven Rostedt (Red Hat) * (like before user_exit()). We can not rely on the RCU
3144a4c35ed2SSteven Rostedt (Red Hat) * infrastructure to do the synchronization, thus we must do it
3145a4c35ed2SSteven Rostedt (Red Hat) * ourselves.
3146a4c35ed2SSteven Rostedt (Red Hat) */
3147e5a971d7SPaul E. McKenney synchronize_rcu_tasks_rude();
3148a4c35ed2SSteven Rostedt (Red Hat)
31490598e4f0SSteven Rostedt (VMware) /*
3150fdda88d3SQiujun Huang * When the kernel is preemptive, tasks can be preempted
31510598e4f0SSteven Rostedt (VMware) * while on a ftrace trampoline. Just scheduling a task on
31520598e4f0SSteven Rostedt (VMware) * a CPU is not good enough to flush them. Calling
3153f2cc020dSIngo Molnar * synchronize_rcu_tasks() will wait for those tasks to
31540598e4f0SSteven Rostedt (VMware) * execute and either schedule voluntarily or enter user space.
31550598e4f0SSteven Rostedt (VMware) */
315630c93704SThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPTION))
31570598e4f0SSteven Rostedt (VMware) synchronize_rcu_tasks();
31580598e4f0SSteven Rostedt (VMware)
3159fc0ea795SAdrian Hunter ftrace_trampoline_free(ops);
3160a4c35ed2SSteven Rostedt (Red Hat) }
3161a4c35ed2SSteven Rostedt (Red Hat)
31628a56d776SSteven Rostedt (Red Hat) return 0;
31633d083395SSteven Rostedt }
31643d083395SSteven Rostedt
3165a5a1d1c2SThomas Gleixner static u64 ftrace_update_time;
31663d083395SSteven Rostedt unsigned long ftrace_update_tot_cnt;
3167da537f0aSSteven Rostedt (VMware) unsigned long ftrace_number_of_pages;
3168da537f0aSSteven Rostedt (VMware) unsigned long ftrace_number_of_groups;
31693d083395SSteven Rostedt
ops_traces_mod(struct ftrace_ops * ops)31708c4f3c3fSSteven Rostedt (Red Hat) static inline int ops_traces_mod(struct ftrace_ops *ops)
3171f7bc8b61SSteven Rostedt {
31728c4f3c3fSSteven Rostedt (Red Hat) /*
31738c4f3c3fSSteven Rostedt (Red Hat) * Filter_hash being empty will default to trace module.
31748c4f3c3fSSteven Rostedt (Red Hat) * But notrace hash requires a test of individual module functions.
31758c4f3c3fSSteven Rostedt (Red Hat) */
317633b7f99cSSteven Rostedt (Red Hat) return ftrace_hash_empty(ops->func_hash->filter_hash) &&
317733b7f99cSSteven Rostedt (Red Hat) ftrace_hash_empty(ops->func_hash->notrace_hash);
31788c4f3c3fSSteven Rostedt (Red Hat) }
3179f7bc8b61SSteven Rostedt
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)31801dc43cf0SJiri Slaby static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
31813d083395SSteven Rostedt {
318267ccddf8SIlya Leoshkevich bool init_nop = ftrace_need_init_nop();
318385ae32aeSSteven Rostedt struct ftrace_page *pg;
3184e94142a6SLai Jiangshan struct dyn_ftrace *p;
3185a5a1d1c2SThomas Gleixner u64 start, stop;
31861dc43cf0SJiri Slaby unsigned long update_cnt = 0;
3187b7ffffbbSSteven Rostedt (Red Hat) unsigned long rec_flags = 0;
318885ae32aeSSteven Rostedt int i;
3189f7bc8b61SSteven Rostedt
3190750ed1a4SIngo Molnar start = ftrace_now(raw_smp_processor_id());
31913d083395SSteven Rostedt
3192b7ffffbbSSteven Rostedt (Red Hat) /*
3193b7ffffbbSSteven Rostedt (Red Hat) * When a module is loaded, this function is called to convert
3194b7ffffbbSSteven Rostedt (Red Hat) * the calls to mcount in its text to nops, and also to create
3195b7ffffbbSSteven Rostedt (Red Hat) * an entry in the ftrace data. Now, if ftrace is activated
3196b7ffffbbSSteven Rostedt (Red Hat) * after this call, but before the module sets its text to
3197b7ffffbbSSteven Rostedt (Red Hat) * read-only, the modification of enabling ftrace can fail if
3198b7ffffbbSSteven Rostedt (Red Hat) * the read-only is done while ftrace is converting the calls.
3199b7ffffbbSSteven Rostedt (Red Hat) * To prevent this, the module's records are set as disabled
3200b7ffffbbSSteven Rostedt (Red Hat) * and will be enabled after the call to set the module's text
3201b7ffffbbSSteven Rostedt (Red Hat) * to read-only.
3202b7ffffbbSSteven Rostedt (Red Hat) */
3203b7ffffbbSSteven Rostedt (Red Hat) if (mod)
3204b7ffffbbSSteven Rostedt (Red Hat) rec_flags |= FTRACE_FL_DISABLED;
3205b7ffffbbSSteven Rostedt (Red Hat)
32061dc43cf0SJiri Slaby for (pg = new_pgs; pg; pg = pg->next) {
3207f22f9a89SAbhishek Sagar
320885ae32aeSSteven Rostedt for (i = 0; i < pg->index; i++) {
32098c4f3c3fSSteven Rostedt (Red Hat)
321008f5ac90SSteven Rostedt /* If something went wrong, bail without enabling anything */
321108f5ac90SSteven Rostedt if (unlikely(ftrace_disabled))
321208f5ac90SSteven Rostedt return -1;
32133d083395SSteven Rostedt
321485ae32aeSSteven Rostedt p = &pg->records[i];
3215b7ffffbbSSteven Rostedt (Red Hat) p->flags = rec_flags;
3216f22f9a89SAbhishek Sagar
32175cb084bbSJiri Olsa /*
321825985edcSLucas De Marchi * Do the initial record conversion from mcount jump
32195cb084bbSJiri Olsa * to the NOP instructions.
32205cb084bbSJiri Olsa */
322167ccddf8SIlya Leoshkevich if (init_nop && !ftrace_nop_initialize(mod, p))
3222d2c8c3eaSSteven Rostedt break;
32235cb084bbSJiri Olsa
32241dc43cf0SJiri Slaby update_cnt++;
32255cb084bbSJiri Olsa }
322685ae32aeSSteven Rostedt }
322785ae32aeSSteven Rostedt
3228750ed1a4SIngo Molnar stop = ftrace_now(raw_smp_processor_id());
32293d083395SSteven Rostedt ftrace_update_time = stop - start;
32301dc43cf0SJiri Slaby ftrace_update_tot_cnt += update_cnt;
32313d083395SSteven Rostedt
323216444a8aSArnaldo Carvalho de Melo return 0;
323316444a8aSArnaldo Carvalho de Melo }
323416444a8aSArnaldo Carvalho de Melo
ftrace_allocate_records(struct ftrace_page * pg,int count)3235a7900875SSteven Rostedt static int ftrace_allocate_records(struct ftrace_page *pg, int count)
32363c1720f0SSteven Rostedt {
3237a7900875SSteven Rostedt int order;
32387ba031e8SSteven Rostedt (VMware) int pages;
32393c1720f0SSteven Rostedt int cnt;
32403c1720f0SSteven Rostedt
3241a7900875SSteven Rostedt if (WARN_ON(!count))
3242a7900875SSteven Rostedt return -EINVAL;
3243a7900875SSteven Rostedt
3244ceaaa129SSteven Rostedt (VMware) /* We want to fill as much as possible, with no empty pages */
3245b40c6eabSWei Yang pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3246ceaaa129SSteven Rostedt (VMware) order = fls(pages) - 1;
32473c1720f0SSteven Rostedt
3248a7900875SSteven Rostedt again:
3249a7900875SSteven Rostedt pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3250a7900875SSteven Rostedt
3251a7900875SSteven Rostedt if (!pg->records) {
3252a7900875SSteven Rostedt /* if we can't allocate this size, try something smaller */
3253a7900875SSteven Rostedt if (!order)
3254a7900875SSteven Rostedt return -ENOMEM;
3255bcea02b0SWang Wensheng order--;
3256a7900875SSteven Rostedt goto again;
3257a7900875SSteven Rostedt }
3258a7900875SSteven Rostedt
3259da537f0aSSteven Rostedt (VMware) ftrace_number_of_pages += 1 << order;
3260da537f0aSSteven Rostedt (VMware) ftrace_number_of_groups++;
3261da537f0aSSteven Rostedt (VMware)
3262a7900875SSteven Rostedt cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3263db42523bSLinus Torvalds pg->order = order;
3264a7900875SSteven Rostedt
3265a7900875SSteven Rostedt if (cnt > count)
3266a7900875SSteven Rostedt cnt = count;
3267a7900875SSteven Rostedt
3268a7900875SSteven Rostedt return cnt;
3269a7900875SSteven Rostedt }
3270a7900875SSteven Rostedt
ftrace_free_pages(struct ftrace_page * pages)327126efd79cSZheng Yejian static void ftrace_free_pages(struct ftrace_page *pages)
327226efd79cSZheng Yejian {
327326efd79cSZheng Yejian struct ftrace_page *pg = pages;
327426efd79cSZheng Yejian
327526efd79cSZheng Yejian while (pg) {
327626efd79cSZheng Yejian if (pg->records) {
327726efd79cSZheng Yejian free_pages((unsigned long)pg->records, pg->order);
327826efd79cSZheng Yejian ftrace_number_of_pages -= 1 << pg->order;
327926efd79cSZheng Yejian }
328026efd79cSZheng Yejian pages = pg->next;
328126efd79cSZheng Yejian kfree(pg);
328226efd79cSZheng Yejian pg = pages;
328326efd79cSZheng Yejian ftrace_number_of_groups--;
328426efd79cSZheng Yejian }
328526efd79cSZheng Yejian }
328626efd79cSZheng Yejian
3287a7900875SSteven Rostedt static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)3288a7900875SSteven Rostedt ftrace_allocate_pages(unsigned long num_to_init)
3289a7900875SSteven Rostedt {
3290a7900875SSteven Rostedt struct ftrace_page *start_pg;
3291a7900875SSteven Rostedt struct ftrace_page *pg;
3292a7900875SSteven Rostedt int cnt;
3293a7900875SSteven Rostedt
3294a7900875SSteven Rostedt if (!num_to_init)
32959efb85c5SHariprasad Kelam return NULL;
3296a7900875SSteven Rostedt
3297a7900875SSteven Rostedt start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3298a7900875SSteven Rostedt if (!pg)
3299a7900875SSteven Rostedt return NULL;
3300a7900875SSteven Rostedt
3301a7900875SSteven Rostedt /*
3302a7900875SSteven Rostedt * Try to allocate as much as possible in one continues
3303a7900875SSteven Rostedt * location that fills in all of the space. We want to
3304a7900875SSteven Rostedt * waste as little space as possible.
3305a7900875SSteven Rostedt */
3306a7900875SSteven Rostedt for (;;) {
3307a7900875SSteven Rostedt cnt = ftrace_allocate_records(pg, num_to_init);
3308a7900875SSteven Rostedt if (cnt < 0)
3309a7900875SSteven Rostedt goto free_pages;
3310a7900875SSteven Rostedt
3311a7900875SSteven Rostedt num_to_init -= cnt;
3312a7900875SSteven Rostedt if (!num_to_init)
3313a7900875SSteven Rostedt break;
3314a7900875SSteven Rostedt
3315a7900875SSteven Rostedt pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3316a7900875SSteven Rostedt if (!pg->next)
3317a7900875SSteven Rostedt goto free_pages;
3318a7900875SSteven Rostedt
3319a7900875SSteven Rostedt pg = pg->next;
3320a7900875SSteven Rostedt }
3321a7900875SSteven Rostedt
3322a7900875SSteven Rostedt return start_pg;
3323a7900875SSteven Rostedt
3324a7900875SSteven Rostedt free_pages:
332526efd79cSZheng Yejian ftrace_free_pages(start_pg);
3326a7900875SSteven Rostedt pr_info("ftrace: FAILED to allocate memory for functions\n");
3327a7900875SSteven Rostedt return NULL;
3328a7900875SSteven Rostedt }
3329a7900875SSteven Rostedt
33305072c59fSSteven Rostedt #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
33315072c59fSSteven Rostedt
33325072c59fSSteven Rostedt struct ftrace_iterator {
333398c4fd04SSteven Rostedt loff_t pos;
33342bccfffdSSteven Rostedt loff_t func_pos;
33355985ea8bSSteven Rostedt (VMware) loff_t mod_pos;
33365072c59fSSteven Rostedt struct ftrace_page *pg;
33374aeb6967SSteven Rostedt struct dyn_ftrace *func;
33384aeb6967SSteven Rostedt struct ftrace_func_probe *probe;
3339eee8ded1SSteven Rostedt (VMware) struct ftrace_func_entry *probe_entry;
33404aeb6967SSteven Rostedt struct trace_parser parser;
33411cf41dd7SSteven Rostedt struct ftrace_hash *hash;
334233dc9b12SSteven Rostedt struct ftrace_ops *ops;
33435985ea8bSSteven Rostedt (VMware) struct trace_array *tr;
33445985ea8bSSteven Rostedt (VMware) struct list_head *mod_list;
3345eee8ded1SSteven Rostedt (VMware) int pidx;
3346431aa3fbSSteven Rostedt int idx;
33475072c59fSSteven Rostedt unsigned flags;
33485072c59fSSteven Rostedt };
33495072c59fSSteven Rostedt
3350e309b41dSIngo Molnar static void *
t_probe_next(struct seq_file * m,loff_t * pos)3351eee8ded1SSteven Rostedt (VMware) t_probe_next(struct seq_file *m, loff_t *pos)
33528fc0c701SSteven Rostedt {
33538fc0c701SSteven Rostedt struct ftrace_iterator *iter = m->private;
3354d2afd57aSSteven Rostedt (VMware) struct trace_array *tr = iter->ops->private;
335504ec7bb6SSteven Rostedt (VMware) struct list_head *func_probes;
3356eee8ded1SSteven Rostedt (VMware) struct ftrace_hash *hash;
3357eee8ded1SSteven Rostedt (VMware) struct list_head *next;
33584aeb6967SSteven Rostedt struct hlist_node *hnd = NULL;
33598fc0c701SSteven Rostedt struct hlist_head *hhd;
3360eee8ded1SSteven Rostedt (VMware) int size;
33618fc0c701SSteven Rostedt
33628fc0c701SSteven Rostedt (*pos)++;
336398c4fd04SSteven Rostedt iter->pos = *pos;
33648fc0c701SSteven Rostedt
336504ec7bb6SSteven Rostedt (VMware) if (!tr)
33668fc0c701SSteven Rostedt return NULL;
33678fc0c701SSteven Rostedt
336804ec7bb6SSteven Rostedt (VMware) func_probes = &tr->func_probes;
336904ec7bb6SSteven Rostedt (VMware) if (list_empty(func_probes))
33708fc0c701SSteven Rostedt return NULL;
33718fc0c701SSteven Rostedt
3372eee8ded1SSteven Rostedt (VMware) if (!iter->probe) {
337304ec7bb6SSteven Rostedt (VMware) next = func_probes->next;
33747b60f3d8SSteven Rostedt (VMware) iter->probe = list_entry(next, struct ftrace_func_probe, list);
3375eee8ded1SSteven Rostedt (VMware) }
3376eee8ded1SSteven Rostedt (VMware)
3377eee8ded1SSteven Rostedt (VMware) if (iter->probe_entry)
3378eee8ded1SSteven Rostedt (VMware) hnd = &iter->probe_entry->hlist;
3379eee8ded1SSteven Rostedt (VMware)
3380eee8ded1SSteven Rostedt (VMware) hash = iter->probe->ops.func_hash->filter_hash;
33817bd46644SNaveen N. Rao
3382372e0d01SSteven Rostedt (VMware) /*
3383372e0d01SSteven Rostedt (VMware) * A probe being registered may temporarily have an empty hash
3384372e0d01SSteven Rostedt (VMware) * and it's at the end of the func_probes list.
3385372e0d01SSteven Rostedt (VMware) */
3386372e0d01SSteven Rostedt (VMware) if (!hash || hash == EMPTY_HASH)
33877bd46644SNaveen N. Rao return NULL;
33887bd46644SNaveen N. Rao
3389eee8ded1SSteven Rostedt (VMware) size = 1 << hash->size_bits;
3390eee8ded1SSteven Rostedt (VMware)
3391eee8ded1SSteven Rostedt (VMware) retry:
3392eee8ded1SSteven Rostedt (VMware) if (iter->pidx >= size) {
339304ec7bb6SSteven Rostedt (VMware) if (iter->probe->list.next == func_probes)
3394eee8ded1SSteven Rostedt (VMware) return NULL;
3395eee8ded1SSteven Rostedt (VMware) next = iter->probe->list.next;
33967b60f3d8SSteven Rostedt (VMware) iter->probe = list_entry(next, struct ftrace_func_probe, list);
3397eee8ded1SSteven Rostedt (VMware) hash = iter->probe->ops.func_hash->filter_hash;
3398eee8ded1SSteven Rostedt (VMware) size = 1 << hash->size_bits;
3399eee8ded1SSteven Rostedt (VMware) iter->pidx = 0;
3400eee8ded1SSteven Rostedt (VMware) }
3401eee8ded1SSteven Rostedt (VMware)
3402eee8ded1SSteven Rostedt (VMware) hhd = &hash->buckets[iter->pidx];
34038fc0c701SSteven Rostedt
34048fc0c701SSteven Rostedt if (hlist_empty(hhd)) {
3405eee8ded1SSteven Rostedt (VMware) iter->pidx++;
34068fc0c701SSteven Rostedt hnd = NULL;
34078fc0c701SSteven Rostedt goto retry;
34088fc0c701SSteven Rostedt }
34098fc0c701SSteven Rostedt
34108fc0c701SSteven Rostedt if (!hnd)
34118fc0c701SSteven Rostedt hnd = hhd->first;
34128fc0c701SSteven Rostedt else {
34138fc0c701SSteven Rostedt hnd = hnd->next;
34148fc0c701SSteven Rostedt if (!hnd) {
3415eee8ded1SSteven Rostedt (VMware) iter->pidx++;
34168fc0c701SSteven Rostedt goto retry;
34178fc0c701SSteven Rostedt }
34188fc0c701SSteven Rostedt }
34198fc0c701SSteven Rostedt
34204aeb6967SSteven Rostedt if (WARN_ON_ONCE(!hnd))
34214aeb6967SSteven Rostedt return NULL;
34224aeb6967SSteven Rostedt
3423eee8ded1SSteven Rostedt (VMware) iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
34244aeb6967SSteven Rostedt
34254aeb6967SSteven Rostedt return iter;
34268fc0c701SSteven Rostedt }
34278fc0c701SSteven Rostedt
t_probe_start(struct seq_file * m,loff_t * pos)3428eee8ded1SSteven Rostedt (VMware) static void *t_probe_start(struct seq_file *m, loff_t *pos)
34298fc0c701SSteven Rostedt {
34308fc0c701SSteven Rostedt struct ftrace_iterator *iter = m->private;
34318fc0c701SSteven Rostedt void *p = NULL;
3432d82d6244SLi Zefan loff_t l;
3433d82d6244SLi Zefan
3434eee8ded1SSteven Rostedt (VMware) if (!(iter->flags & FTRACE_ITER_DO_PROBES))
343569a3083cSSteven Rostedt return NULL;
343669a3083cSSteven Rostedt
34375985ea8bSSteven Rostedt (VMware) if (iter->mod_pos > *pos)
34382bccfffdSSteven Rostedt return NULL;
34398fc0c701SSteven Rostedt
3440eee8ded1SSteven Rostedt (VMware) iter->probe = NULL;
3441eee8ded1SSteven Rostedt (VMware) iter->probe_entry = NULL;
3442eee8ded1SSteven Rostedt (VMware) iter->pidx = 0;
34435985ea8bSSteven Rostedt (VMware) for (l = 0; l <= (*pos - iter->mod_pos); ) {
3444eee8ded1SSteven Rostedt (VMware) p = t_probe_next(m, &l);
3445d82d6244SLi Zefan if (!p)
3446d82d6244SLi Zefan break;
3447d82d6244SLi Zefan }
34484aeb6967SSteven Rostedt if (!p)
34494aeb6967SSteven Rostedt return NULL;
34504aeb6967SSteven Rostedt
345198c4fd04SSteven Rostedt /* Only set this if we have an item */
3452eee8ded1SSteven Rostedt (VMware) iter->flags |= FTRACE_ITER_PROBE;
345398c4fd04SSteven Rostedt
34544aeb6967SSteven Rostedt return iter;
34558fc0c701SSteven Rostedt }
34568fc0c701SSteven Rostedt
34574aeb6967SSteven Rostedt static int
t_probe_show(struct seq_file * m,struct ftrace_iterator * iter)3458eee8ded1SSteven Rostedt (VMware) t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
34598fc0c701SSteven Rostedt {
3460eee8ded1SSteven Rostedt (VMware) struct ftrace_func_entry *probe_entry;
34617b60f3d8SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops;
34627b60f3d8SSteven Rostedt (VMware) struct ftrace_func_probe *probe;
34638fc0c701SSteven Rostedt
3464eee8ded1SSteven Rostedt (VMware) probe = iter->probe;
3465eee8ded1SSteven Rostedt (VMware) probe_entry = iter->probe_entry;
3466eee8ded1SSteven Rostedt (VMware)
3467eee8ded1SSteven Rostedt (VMware) if (WARN_ON_ONCE(!probe || !probe_entry))
34684aeb6967SSteven Rostedt return -EIO;
34698fc0c701SSteven Rostedt
34707b60f3d8SSteven Rostedt (VMware) probe_ops = probe->probe_ops;
3471809dcf29SSteven Rostedt
34727b60f3d8SSteven Rostedt (VMware) if (probe_ops->print)
34736e444319SSteven Rostedt (VMware) return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
34748fc0c701SSteven Rostedt
34757b60f3d8SSteven Rostedt (VMware) seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
34767b60f3d8SSteven Rostedt (VMware) (void *)probe_ops->func);
34778fc0c701SSteven Rostedt
34788fc0c701SSteven Rostedt return 0;
34798fc0c701SSteven Rostedt }
34808fc0c701SSteven Rostedt
34818fc0c701SSteven Rostedt static void *
t_mod_next(struct seq_file * m,loff_t * pos)34825985ea8bSSteven Rostedt (VMware) t_mod_next(struct seq_file *m, loff_t *pos)
34835985ea8bSSteven Rostedt (VMware) {
34845985ea8bSSteven Rostedt (VMware) struct ftrace_iterator *iter = m->private;
34855985ea8bSSteven Rostedt (VMware) struct trace_array *tr = iter->tr;
34865985ea8bSSteven Rostedt (VMware)
34875985ea8bSSteven Rostedt (VMware) (*pos)++;
34885985ea8bSSteven Rostedt (VMware) iter->pos = *pos;
34895985ea8bSSteven Rostedt (VMware)
34905985ea8bSSteven Rostedt (VMware) iter->mod_list = iter->mod_list->next;
34915985ea8bSSteven Rostedt (VMware)
34925985ea8bSSteven Rostedt (VMware) if (iter->mod_list == &tr->mod_trace ||
34935985ea8bSSteven Rostedt (VMware) iter->mod_list == &tr->mod_notrace) {
34945985ea8bSSteven Rostedt (VMware) iter->flags &= ~FTRACE_ITER_MOD;
34955985ea8bSSteven Rostedt (VMware) return NULL;
34965985ea8bSSteven Rostedt (VMware) }
34975985ea8bSSteven Rostedt (VMware)
34985985ea8bSSteven Rostedt (VMware) iter->mod_pos = *pos;
34995985ea8bSSteven Rostedt (VMware)
35005985ea8bSSteven Rostedt (VMware) return iter;
35015985ea8bSSteven Rostedt (VMware) }
35025985ea8bSSteven Rostedt (VMware)
t_mod_start(struct seq_file * m,loff_t * pos)35035985ea8bSSteven Rostedt (VMware) static void *t_mod_start(struct seq_file *m, loff_t *pos)
35045985ea8bSSteven Rostedt (VMware) {
35055985ea8bSSteven Rostedt (VMware) struct ftrace_iterator *iter = m->private;
35065985ea8bSSteven Rostedt (VMware) void *p = NULL;
35075985ea8bSSteven Rostedt (VMware) loff_t l;
35085985ea8bSSteven Rostedt (VMware)
35095985ea8bSSteven Rostedt (VMware) if (iter->func_pos > *pos)
35105985ea8bSSteven Rostedt (VMware) return NULL;
35115985ea8bSSteven Rostedt (VMware)
35125985ea8bSSteven Rostedt (VMware) iter->mod_pos = iter->func_pos;
35135985ea8bSSteven Rostedt (VMware)
35145985ea8bSSteven Rostedt (VMware) /* probes are only available if tr is set */
35155985ea8bSSteven Rostedt (VMware) if (!iter->tr)
35165985ea8bSSteven Rostedt (VMware) return NULL;
35175985ea8bSSteven Rostedt (VMware)
35185985ea8bSSteven Rostedt (VMware) for (l = 0; l <= (*pos - iter->func_pos); ) {
35195985ea8bSSteven Rostedt (VMware) p = t_mod_next(m, &l);
35205985ea8bSSteven Rostedt (VMware) if (!p)
35215985ea8bSSteven Rostedt (VMware) break;
35225985ea8bSSteven Rostedt (VMware) }
35235985ea8bSSteven Rostedt (VMware) if (!p) {
35245985ea8bSSteven Rostedt (VMware) iter->flags &= ~FTRACE_ITER_MOD;
35255985ea8bSSteven Rostedt (VMware) return t_probe_start(m, pos);
35265985ea8bSSteven Rostedt (VMware) }
35275985ea8bSSteven Rostedt (VMware)
35285985ea8bSSteven Rostedt (VMware) /* Only set this if we have an item */
35295985ea8bSSteven Rostedt (VMware) iter->flags |= FTRACE_ITER_MOD;
35305985ea8bSSteven Rostedt (VMware)
35315985ea8bSSteven Rostedt (VMware) return iter;
35325985ea8bSSteven Rostedt (VMware) }
35335985ea8bSSteven Rostedt (VMware)
35345985ea8bSSteven Rostedt (VMware) static int
t_mod_show(struct seq_file * m,struct ftrace_iterator * iter)35355985ea8bSSteven Rostedt (VMware) t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
35365985ea8bSSteven Rostedt (VMware) {
35375985ea8bSSteven Rostedt (VMware) struct ftrace_mod_load *ftrace_mod;
35385985ea8bSSteven Rostedt (VMware) struct trace_array *tr = iter->tr;
35395985ea8bSSteven Rostedt (VMware)
35405985ea8bSSteven Rostedt (VMware) if (WARN_ON_ONCE(!iter->mod_list) ||
35415985ea8bSSteven Rostedt (VMware) iter->mod_list == &tr->mod_trace ||
35425985ea8bSSteven Rostedt (VMware) iter->mod_list == &tr->mod_notrace)
35435985ea8bSSteven Rostedt (VMware) return -EIO;
35445985ea8bSSteven Rostedt (VMware)
35455985ea8bSSteven Rostedt (VMware) ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
35465985ea8bSSteven Rostedt (VMware)
35475985ea8bSSteven Rostedt (VMware) if (ftrace_mod->func)
35485985ea8bSSteven Rostedt (VMware) seq_printf(m, "%s", ftrace_mod->func);
35495985ea8bSSteven Rostedt (VMware) else
35505985ea8bSSteven Rostedt (VMware) seq_putc(m, '*');
35515985ea8bSSteven Rostedt (VMware)
35525985ea8bSSteven Rostedt (VMware) seq_printf(m, ":mod:%s\n", ftrace_mod->module);
35535985ea8bSSteven Rostedt (VMware)
35545985ea8bSSteven Rostedt (VMware) return 0;
35555985ea8bSSteven Rostedt (VMware) }
35565985ea8bSSteven Rostedt (VMware)
35575985ea8bSSteven Rostedt (VMware) static void *
t_func_next(struct seq_file * m,loff_t * pos)35585bd84629SSteven Rostedt (VMware) t_func_next(struct seq_file *m, loff_t *pos)
35595072c59fSSteven Rostedt {
35605072c59fSSteven Rostedt struct ftrace_iterator *iter = m->private;
35615072c59fSSteven Rostedt struct dyn_ftrace *rec = NULL;
35625072c59fSSteven Rostedt
35635072c59fSSteven Rostedt (*pos)++;
35640c75a3edSSteven Rostedt
35655072c59fSSteven Rostedt retry:
35665072c59fSSteven Rostedt if (iter->idx >= iter->pg->index) {
35675072c59fSSteven Rostedt if (iter->pg->next) {
35685072c59fSSteven Rostedt iter->pg = iter->pg->next;
35695072c59fSSteven Rostedt iter->idx = 0;
35705072c59fSSteven Rostedt goto retry;
35715072c59fSSteven Rostedt }
35725072c59fSSteven Rostedt } else {
35735072c59fSSteven Rostedt rec = &iter->pg->records[iter->idx++];
3574c20489daSSteven Rostedt (VMware) if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3575c20489daSSteven Rostedt (VMware) !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3576647bcd03SSteven Rostedt
3577647bcd03SSteven Rostedt ((iter->flags & FTRACE_ITER_ENABLED) &&
3578e11b521aSSteven Rostedt (Google) !(rec->flags & FTRACE_FL_ENABLED)) ||
3579e11b521aSSteven Rostedt (Google)
3580e11b521aSSteven Rostedt (Google) ((iter->flags & FTRACE_ITER_TOUCHED) &&
3581e11b521aSSteven Rostedt (Google) !(rec->flags & FTRACE_FL_TOUCHED))) {
3582647bcd03SSteven Rostedt
35835072c59fSSteven Rostedt rec = NULL;
35845072c59fSSteven Rostedt goto retry;
35855072c59fSSteven Rostedt }
35865072c59fSSteven Rostedt }
35875072c59fSSteven Rostedt
35884aeb6967SSteven Rostedt if (!rec)
35895bd84629SSteven Rostedt (VMware) return NULL;
35904aeb6967SSteven Rostedt
35915bd84629SSteven Rostedt (VMware) iter->pos = iter->func_pos = *pos;
35924aeb6967SSteven Rostedt iter->func = rec;
35934aeb6967SSteven Rostedt
35944aeb6967SSteven Rostedt return iter;
35955072c59fSSteven Rostedt }
35965072c59fSSteven Rostedt
35975bd84629SSteven Rostedt (VMware) static void *
t_next(struct seq_file * m,void * v,loff_t * pos)35985bd84629SSteven Rostedt (VMware) t_next(struct seq_file *m, void *v, loff_t *pos)
35995bd84629SSteven Rostedt (VMware) {
36005bd84629SSteven Rostedt (VMware) struct ftrace_iterator *iter = m->private;
36015985ea8bSSteven Rostedt (VMware) loff_t l = *pos; /* t_probe_start() must use original pos */
36025bd84629SSteven Rostedt (VMware) void *ret;
36035bd84629SSteven Rostedt (VMware)
36045bd84629SSteven Rostedt (VMware) if (unlikely(ftrace_disabled))
36055bd84629SSteven Rostedt (VMware) return NULL;
36065bd84629SSteven Rostedt (VMware)
3607eee8ded1SSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_PROBE)
3608eee8ded1SSteven Rostedt (VMware) return t_probe_next(m, pos);
36095bd84629SSteven Rostedt (VMware)
36105985ea8bSSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_MOD)
36115985ea8bSSteven Rostedt (VMware) return t_mod_next(m, pos);
36125985ea8bSSteven Rostedt (VMware)
36135bd84629SSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_PRINTALL) {
3614eee8ded1SSteven Rostedt (VMware) /* next must increment pos, and t_probe_start does not */
36155bd84629SSteven Rostedt (VMware) (*pos)++;
36165985ea8bSSteven Rostedt (VMware) return t_mod_start(m, &l);
36175bd84629SSteven Rostedt (VMware) }
36185bd84629SSteven Rostedt (VMware)
36195bd84629SSteven Rostedt (VMware) ret = t_func_next(m, pos);
36205bd84629SSteven Rostedt (VMware)
36215bd84629SSteven Rostedt (VMware) if (!ret)
36225985ea8bSSteven Rostedt (VMware) return t_mod_start(m, &l);
36235bd84629SSteven Rostedt (VMware)
36245bd84629SSteven Rostedt (VMware) return ret;
36255bd84629SSteven Rostedt (VMware) }
36265bd84629SSteven Rostedt (VMware)
reset_iter_read(struct ftrace_iterator * iter)362798c4fd04SSteven Rostedt static void reset_iter_read(struct ftrace_iterator *iter)
362898c4fd04SSteven Rostedt {
362998c4fd04SSteven Rostedt iter->pos = 0;
363098c4fd04SSteven Rostedt iter->func_pos = 0;
36315985ea8bSSteven Rostedt (VMware) iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
36325072c59fSSteven Rostedt }
36335072c59fSSteven Rostedt
t_start(struct seq_file * m,loff_t * pos)36345072c59fSSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
36355072c59fSSteven Rostedt {
36365072c59fSSteven Rostedt struct ftrace_iterator *iter = m->private;
36375072c59fSSteven Rostedt void *p = NULL;
3638694ce0a5SLi Zefan loff_t l;
36395072c59fSSteven Rostedt
36408fc0c701SSteven Rostedt mutex_lock(&ftrace_lock);
364145a4a237SSteven Rostedt
364245a4a237SSteven Rostedt if (unlikely(ftrace_disabled))
364345a4a237SSteven Rostedt return NULL;
364445a4a237SSteven Rostedt
36450c75a3edSSteven Rostedt /*
364698c4fd04SSteven Rostedt * If an lseek was done, then reset and start from beginning.
364798c4fd04SSteven Rostedt */
364898c4fd04SSteven Rostedt if (*pos < iter->pos)
364998c4fd04SSteven Rostedt reset_iter_read(iter);
365098c4fd04SSteven Rostedt
365198c4fd04SSteven Rostedt /*
36520c75a3edSSteven Rostedt * For set_ftrace_filter reading, if we have the filter
36530c75a3edSSteven Rostedt * off, we can short cut and just print out that all
36540c75a3edSSteven Rostedt * functions are enabled.
36550c75a3edSSteven Rostedt */
3656c20489daSSteven Rostedt (VMware) if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3657c20489daSSteven Rostedt (VMware) ftrace_hash_empty(iter->hash)) {
365843ff926aSSteven Rostedt (VMware) iter->func_pos = 1; /* Account for the message */
36590c75a3edSSteven Rostedt if (*pos > 0)
36605985ea8bSSteven Rostedt (VMware) return t_mod_start(m, pos);
36610c75a3edSSteven Rostedt iter->flags |= FTRACE_ITER_PRINTALL;
3662df091625SChris Wright /* reset in case of seek/pread */
3663eee8ded1SSteven Rostedt (VMware) iter->flags &= ~FTRACE_ITER_PROBE;
36640c75a3edSSteven Rostedt return iter;
36650c75a3edSSteven Rostedt }
36660c75a3edSSteven Rostedt
36675985ea8bSSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_MOD)
36685985ea8bSSteven Rostedt (VMware) return t_mod_start(m, pos);
36698fc0c701SSteven Rostedt
367098c4fd04SSteven Rostedt /*
367198c4fd04SSteven Rostedt * Unfortunately, we need to restart at ftrace_pages_start
367298c4fd04SSteven Rostedt * every time we let go of the ftrace_mutex. This is because
367398c4fd04SSteven Rostedt * those pointers can change without the lock.
367498c4fd04SSteven Rostedt */
3675694ce0a5SLi Zefan iter->pg = ftrace_pages_start;
3676694ce0a5SLi Zefan iter->idx = 0;
3677694ce0a5SLi Zefan for (l = 0; l <= *pos; ) {
36785bd84629SSteven Rostedt (VMware) p = t_func_next(m, &l);
3679694ce0a5SLi Zefan if (!p)
3680694ce0a5SLi Zefan break;
368150cdaf08SLiming Wang }
36825821e1b7Swalimis
368369a3083cSSteven Rostedt if (!p)
36845985ea8bSSteven Rostedt (VMware) return t_mod_start(m, pos);
36858fc0c701SSteven Rostedt
36864aeb6967SSteven Rostedt return iter;
36875072c59fSSteven Rostedt }
36885072c59fSSteven Rostedt
t_stop(struct seq_file * m,void * p)36895072c59fSSteven Rostedt static void t_stop(struct seq_file *m, void *p)
36905072c59fSSteven Rostedt {
36918fc0c701SSteven Rostedt mutex_unlock(&ftrace_lock);
36925072c59fSSteven Rostedt }
36935072c59fSSteven Rostedt
369415d5b02cSSteven Rostedt (Red Hat) void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)369515d5b02cSSteven Rostedt (Red Hat) arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
369615d5b02cSSteven Rostedt (Red Hat) {
369715d5b02cSSteven Rostedt (Red Hat) return NULL;
369815d5b02cSSteven Rostedt (Red Hat) }
369915d5b02cSSteven Rostedt (Red Hat)
add_trampoline_func(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)370015d5b02cSSteven Rostedt (Red Hat) static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
370115d5b02cSSteven Rostedt (Red Hat) struct dyn_ftrace *rec)
370215d5b02cSSteven Rostedt (Red Hat) {
370315d5b02cSSteven Rostedt (Red Hat) void *ptr;
370415d5b02cSSteven Rostedt (Red Hat)
370515d5b02cSSteven Rostedt (Red Hat) ptr = arch_ftrace_trampoline_func(ops, rec);
370615d5b02cSSteven Rostedt (Red Hat) if (ptr)
370715d5b02cSSteven Rostedt (Red Hat) seq_printf(m, " ->%pS", ptr);
370815d5b02cSSteven Rostedt (Red Hat) }
370915d5b02cSSteven Rostedt (Red Hat)
3710b39181f7SSteven Rostedt (Google) #ifdef FTRACE_MCOUNT_MAX_OFFSET
3711b39181f7SSteven Rostedt (Google) /*
3712b39181f7SSteven Rostedt (Google) * Weak functions can still have an mcount/fentry that is saved in
3713b39181f7SSteven Rostedt (Google) * the __mcount_loc section. These can be detected by having a
3714b39181f7SSteven Rostedt (Google) * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
3715b39181f7SSteven Rostedt (Google) * symbol found by kallsyms is not the function that the mcount/fentry
3716b39181f7SSteven Rostedt (Google) * is part of. The offset is much greater in these cases.
3717b39181f7SSteven Rostedt (Google) *
3718b39181f7SSteven Rostedt (Google) * Test the record to make sure that the ip points to a valid kallsyms
3719b39181f7SSteven Rostedt (Google) * and if not, mark it disabled.
3720b39181f7SSteven Rostedt (Google) */
test_for_valid_rec(struct dyn_ftrace * rec)3721b39181f7SSteven Rostedt (Google) static int test_for_valid_rec(struct dyn_ftrace *rec)
3722b39181f7SSteven Rostedt (Google) {
3723b39181f7SSteven Rostedt (Google) char str[KSYM_SYMBOL_LEN];
3724b39181f7SSteven Rostedt (Google) unsigned long offset;
3725b39181f7SSteven Rostedt (Google) const char *ret;
3726b39181f7SSteven Rostedt (Google)
3727b39181f7SSteven Rostedt (Google) ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3728b39181f7SSteven Rostedt (Google)
3729b39181f7SSteven Rostedt (Google) /* Weak functions can cause invalid addresses */
3730b39181f7SSteven Rostedt (Google) if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3731b39181f7SSteven Rostedt (Google) rec->flags |= FTRACE_FL_DISABLED;
3732b39181f7SSteven Rostedt (Google) return 0;
3733b39181f7SSteven Rostedt (Google) }
3734b39181f7SSteven Rostedt (Google) return 1;
3735b39181f7SSteven Rostedt (Google) }
3736b39181f7SSteven Rostedt (Google)
3737b39181f7SSteven Rostedt (Google) static struct workqueue_struct *ftrace_check_wq __initdata;
3738b39181f7SSteven Rostedt (Google) static struct work_struct ftrace_check_work __initdata;
3739b39181f7SSteven Rostedt (Google)
3740b39181f7SSteven Rostedt (Google) /*
3741b39181f7SSteven Rostedt (Google) * Scan all the mcount/fentry entries to make sure they are valid.
3742b39181f7SSteven Rostedt (Google) */
ftrace_check_work_func(struct work_struct * work)3743b39181f7SSteven Rostedt (Google) static __init void ftrace_check_work_func(struct work_struct *work)
3744b39181f7SSteven Rostedt (Google) {
3745b39181f7SSteven Rostedt (Google) struct ftrace_page *pg;
3746b39181f7SSteven Rostedt (Google) struct dyn_ftrace *rec;
3747b39181f7SSteven Rostedt (Google)
3748b39181f7SSteven Rostedt (Google) mutex_lock(&ftrace_lock);
3749b39181f7SSteven Rostedt (Google) do_for_each_ftrace_rec(pg, rec) {
3750b39181f7SSteven Rostedt (Google) test_for_valid_rec(rec);
3751b39181f7SSteven Rostedt (Google) } while_for_each_ftrace_rec();
3752b39181f7SSteven Rostedt (Google) mutex_unlock(&ftrace_lock);
3753b39181f7SSteven Rostedt (Google) }
3754b39181f7SSteven Rostedt (Google)
ftrace_check_for_weak_functions(void)3755b39181f7SSteven Rostedt (Google) static int __init ftrace_check_for_weak_functions(void)
3756b39181f7SSteven Rostedt (Google) {
3757b39181f7SSteven Rostedt (Google) INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3758b39181f7SSteven Rostedt (Google)
3759b39181f7SSteven Rostedt (Google) ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3760b39181f7SSteven Rostedt (Google)
3761b39181f7SSteven Rostedt (Google) queue_work(ftrace_check_wq, &ftrace_check_work);
3762b39181f7SSteven Rostedt (Google) return 0;
3763b39181f7SSteven Rostedt (Google) }
3764b39181f7SSteven Rostedt (Google)
ftrace_check_sync(void)3765b39181f7SSteven Rostedt (Google) static int __init ftrace_check_sync(void)
3766b39181f7SSteven Rostedt (Google) {
3767b39181f7SSteven Rostedt (Google) /* Make sure the ftrace_check updates are finished */
3768b39181f7SSteven Rostedt (Google) if (ftrace_check_wq)
3769b39181f7SSteven Rostedt (Google) destroy_workqueue(ftrace_check_wq);
3770b39181f7SSteven Rostedt (Google) return 0;
3771b39181f7SSteven Rostedt (Google) }
3772b39181f7SSteven Rostedt (Google)
3773b39181f7SSteven Rostedt (Google) late_initcall_sync(ftrace_check_sync);
3774b39181f7SSteven Rostedt (Google) subsys_initcall(ftrace_check_for_weak_functions);
3775b39181f7SSteven Rostedt (Google)
print_rec(struct seq_file * m,unsigned long ip)3776b39181f7SSteven Rostedt (Google) static int print_rec(struct seq_file *m, unsigned long ip)
3777b39181f7SSteven Rostedt (Google) {
3778b39181f7SSteven Rostedt (Google) unsigned long offset;
3779b39181f7SSteven Rostedt (Google) char str[KSYM_SYMBOL_LEN];
3780b39181f7SSteven Rostedt (Google) char *modname;
3781b39181f7SSteven Rostedt (Google) const char *ret;
3782b39181f7SSteven Rostedt (Google)
3783b39181f7SSteven Rostedt (Google) ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3784b39181f7SSteven Rostedt (Google) /* Weak functions can cause invalid addresses */
3785b39181f7SSteven Rostedt (Google) if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3786b39181f7SSteven Rostedt (Google) snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3787b39181f7SSteven Rostedt (Google) FTRACE_INVALID_FUNCTION, offset);
3788b39181f7SSteven Rostedt (Google) ret = NULL;
3789b39181f7SSteven Rostedt (Google) }
3790b39181f7SSteven Rostedt (Google)
3791b39181f7SSteven Rostedt (Google) seq_puts(m, str);
3792b39181f7SSteven Rostedt (Google) if (modname)
3793b39181f7SSteven Rostedt (Google) seq_printf(m, " [%s]", modname);
3794b39181f7SSteven Rostedt (Google) return ret == NULL ? -1 : 0;
3795b39181f7SSteven Rostedt (Google) }
3796b39181f7SSteven Rostedt (Google) #else
test_for_valid_rec(struct dyn_ftrace * rec)3797b39181f7SSteven Rostedt (Google) static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3798b39181f7SSteven Rostedt (Google) {
3799b39181f7SSteven Rostedt (Google) return 1;
3800b39181f7SSteven Rostedt (Google) }
3801b39181f7SSteven Rostedt (Google)
print_rec(struct seq_file * m,unsigned long ip)3802b39181f7SSteven Rostedt (Google) static inline int print_rec(struct seq_file *m, unsigned long ip)
3803b39181f7SSteven Rostedt (Google) {
3804b39181f7SSteven Rostedt (Google) seq_printf(m, "%ps", (void *)ip);
3805b39181f7SSteven Rostedt (Google) return 0;
3806b39181f7SSteven Rostedt (Google) }
3807b39181f7SSteven Rostedt (Google) #endif
3808b39181f7SSteven Rostedt (Google)
t_show(struct seq_file * m,void * v)38095072c59fSSteven Rostedt static int t_show(struct seq_file *m, void *v)
38105072c59fSSteven Rostedt {
38110c75a3edSSteven Rostedt struct ftrace_iterator *iter = m->private;
38124aeb6967SSteven Rostedt struct dyn_ftrace *rec;
38135072c59fSSteven Rostedt
3814eee8ded1SSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_PROBE)
3815eee8ded1SSteven Rostedt (VMware) return t_probe_show(m, iter);
38168fc0c701SSteven Rostedt
38175985ea8bSSteven Rostedt (VMware) if (iter->flags & FTRACE_ITER_MOD)
38185985ea8bSSteven Rostedt (VMware) return t_mod_show(m, iter);
38195985ea8bSSteven Rostedt (VMware)
38200c75a3edSSteven Rostedt if (iter->flags & FTRACE_ITER_PRINTALL) {
38218c006cf7SNamhyung Kim if (iter->flags & FTRACE_ITER_NOTRACE)
3822fa6f0cc7SRasmus Villemoes seq_puts(m, "#### no functions disabled ####\n");
38238c006cf7SNamhyung Kim else
3824fa6f0cc7SRasmus Villemoes seq_puts(m, "#### all functions enabled ####\n");
38250c75a3edSSteven Rostedt return 0;
38260c75a3edSSteven Rostedt }
38270c75a3edSSteven Rostedt
38284aeb6967SSteven Rostedt rec = iter->func;
38294aeb6967SSteven Rostedt
38305072c59fSSteven Rostedt if (!rec)
38315072c59fSSteven Rostedt return 0;
38325072c59fSSteven Rostedt
383383f74441SJiri Olsa if (iter->flags & FTRACE_ITER_ADDRS)
383483f74441SJiri Olsa seq_printf(m, "%lx ", rec->ip);
383583f74441SJiri Olsa
3836b39181f7SSteven Rostedt (Google) if (print_rec(m, rec->ip)) {
3837b39181f7SSteven Rostedt (Google) /* This should only happen when a rec is disabled */
3838b39181f7SSteven Rostedt (Google) WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3839b39181f7SSteven Rostedt (Google) seq_putc(m, '\n');
3840b39181f7SSteven Rostedt (Google) return 0;
3841b39181f7SSteven Rostedt (Google) }
3842b39181f7SSteven Rostedt (Google)
3843e11b521aSSteven Rostedt (Google) if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
3844030f4e1cSSteven Rostedt (Red Hat) struct ftrace_ops *ops;
384515d5b02cSSteven Rostedt (Red Hat)
38466ce2c04fSSteven Rostedt (Google) seq_printf(m, " (%ld)%s%s%s%s%s",
38470376bde1SSteven Rostedt (Red Hat) ftrace_rec_count(rec),
3848f8b8be8aSMasami Hiramatsu rec->flags & FTRACE_FL_REGS ? " R" : " ",
3849763e34e7SSteven Rostedt (VMware) rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3850cbad0fb2SMark Rutland rec->flags & FTRACE_FL_DIRECT ? " D" : " ",
38516ce2c04fSSteven Rostedt (Google) rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ",
38526ce2c04fSSteven Rostedt (Google) rec->flags & FTRACE_FL_MODIFIED ? " M " : " ");
38539674b2faSSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_TRAMP_EN) {
38545fecaa04SSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_any(rec);
385539daa7b9SSteven Rostedt (Red Hat) if (ops) {
385639daa7b9SSteven Rostedt (Red Hat) do {
385739daa7b9SSteven Rostedt (Red Hat) seq_printf(m, "\ttramp: %pS (%pS)",
385839daa7b9SSteven Rostedt (Red Hat) (void *)ops->trampoline,
385939daa7b9SSteven Rostedt (Red Hat) (void *)ops->func);
3860030f4e1cSSteven Rostedt (Red Hat) add_trampoline_func(m, ops, rec);
386139daa7b9SSteven Rostedt (Red Hat) ops = ftrace_find_tramp_ops_next(rec, ops);
386239daa7b9SSteven Rostedt (Red Hat) } while (ops);
386339daa7b9SSteven Rostedt (Red Hat) } else
3864fa6f0cc7SRasmus Villemoes seq_puts(m, "\ttramp: ERROR!");
3865030f4e1cSSteven Rostedt (Red Hat) } else {
3866030f4e1cSSteven Rostedt (Red Hat) add_trampoline_func(m, NULL, rec);
38679674b2faSSteven Rostedt (Red Hat) }
3868cbad0fb2SMark Rutland if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
3869cbad0fb2SMark Rutland ops = ftrace_find_unique_ops(rec);
3870cbad0fb2SMark Rutland if (ops) {
3871cbad0fb2SMark Rutland seq_printf(m, "\tops: %pS (%pS)",
3872cbad0fb2SMark Rutland ops, ops->func);
3873cbad0fb2SMark Rutland } else {
3874cbad0fb2SMark Rutland seq_puts(m, "\tops: ERROR!");
3875cbad0fb2SMark Rutland }
3876cbad0fb2SMark Rutland }
3877763e34e7SSteven Rostedt (VMware) if (rec->flags & FTRACE_FL_DIRECT) {
3878763e34e7SSteven Rostedt (VMware) unsigned long direct;
3879763e34e7SSteven Rostedt (VMware)
3880ff205766SAlexei Starovoitov direct = ftrace_find_rec_direct(rec->ip);
3881763e34e7SSteven Rostedt (VMware) if (direct)
3882763e34e7SSteven Rostedt (VMware) seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3883763e34e7SSteven Rostedt (VMware) }
38849674b2faSSteven Rostedt (Red Hat) }
38859674b2faSSteven Rostedt (Red Hat)
3886fa6f0cc7SRasmus Villemoes seq_putc(m, '\n');
38875072c59fSSteven Rostedt
38885072c59fSSteven Rostedt return 0;
38895072c59fSSteven Rostedt }
38905072c59fSSteven Rostedt
389188e9d34cSJames Morris static const struct seq_operations show_ftrace_seq_ops = {
38925072c59fSSteven Rostedt .start = t_start,
38935072c59fSSteven Rostedt .next = t_next,
38945072c59fSSteven Rostedt .stop = t_stop,
38955072c59fSSteven Rostedt .show = t_show,
38965072c59fSSteven Rostedt };
38975072c59fSSteven Rostedt
3898e309b41dSIngo Molnar static int
ftrace_avail_open(struct inode * inode,struct file * file)38995072c59fSSteven Rostedt ftrace_avail_open(struct inode *inode, struct file *file)
39005072c59fSSteven Rostedt {
39015072c59fSSteven Rostedt struct ftrace_iterator *iter;
390217911ff3SSteven Rostedt (VMware) int ret;
390317911ff3SSteven Rostedt (VMware)
390417911ff3SSteven Rostedt (VMware) ret = security_locked_down(LOCKDOWN_TRACEFS);
390517911ff3SSteven Rostedt (VMware) if (ret)
390617911ff3SSteven Rostedt (VMware) return ret;
39075072c59fSSteven Rostedt
39084eebcc81SSteven Rostedt if (unlikely(ftrace_disabled))
39094eebcc81SSteven Rostedt return -ENODEV;
39104eebcc81SSteven Rostedt
391150e18b94SJiri Olsa iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3912c1bc5919SSteven Rostedt (VMware) if (!iter)
3913c1bc5919SSteven Rostedt (VMware) return -ENOMEM;
3914c1bc5919SSteven Rostedt (VMware)
39155072c59fSSteven Rostedt iter->pg = ftrace_pages_start;
3916fc13cb0cSSteven Rostedt iter->ops = &global_ops;
39175072c59fSSteven Rostedt
3918c1bc5919SSteven Rostedt (VMware) return 0;
39195072c59fSSteven Rostedt }
39205072c59fSSteven Rostedt
3921647bcd03SSteven Rostedt static int
ftrace_enabled_open(struct inode * inode,struct file * file)3922647bcd03SSteven Rostedt ftrace_enabled_open(struct inode *inode, struct file *file)
3923647bcd03SSteven Rostedt {
3924647bcd03SSteven Rostedt struct ftrace_iterator *iter;
3925647bcd03SSteven Rostedt
392617911ff3SSteven Rostedt (VMware) /*
392717911ff3SSteven Rostedt (VMware) * This shows us what functions are currently being
392817911ff3SSteven Rostedt (VMware) * traced and by what. Not sure if we want lockdown
392917911ff3SSteven Rostedt (VMware) * to hide such critical information for an admin.
393017911ff3SSteven Rostedt (VMware) * Although, perhaps it can show information we don't
393117911ff3SSteven Rostedt (VMware) * want people to see, but if something is tracing
393217911ff3SSteven Rostedt (VMware) * something, we probably want to know about it.
393317911ff3SSteven Rostedt (VMware) */
393417911ff3SSteven Rostedt (VMware)
393550e18b94SJiri Olsa iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3936c1bc5919SSteven Rostedt (VMware) if (!iter)
3937c1bc5919SSteven Rostedt (VMware) return -ENOMEM;
3938c1bc5919SSteven Rostedt (VMware)
3939647bcd03SSteven Rostedt iter->pg = ftrace_pages_start;
3940647bcd03SSteven Rostedt iter->flags = FTRACE_ITER_ENABLED;
3941fc13cb0cSSteven Rostedt iter->ops = &global_ops;
3942647bcd03SSteven Rostedt
3943c1bc5919SSteven Rostedt (VMware) return 0;
3944647bcd03SSteven Rostedt }
3945647bcd03SSteven Rostedt
3946e11b521aSSteven Rostedt (Google) static int
ftrace_touched_open(struct inode * inode,struct file * file)3947e11b521aSSteven Rostedt (Google) ftrace_touched_open(struct inode *inode, struct file *file)
3948e11b521aSSteven Rostedt (Google) {
3949e11b521aSSteven Rostedt (Google) struct ftrace_iterator *iter;
3950e11b521aSSteven Rostedt (Google)
3951e11b521aSSteven Rostedt (Google) /*
3952e11b521aSSteven Rostedt (Google) * This shows us what functions have ever been enabled
3953e11b521aSSteven Rostedt (Google) * (traced, direct, patched, etc). Not sure if we want lockdown
3954e11b521aSSteven Rostedt (Google) * to hide such critical information for an admin.
3955e11b521aSSteven Rostedt (Google) * Although, perhaps it can show information we don't
3956e11b521aSSteven Rostedt (Google) * want people to see, but if something had traced
3957e11b521aSSteven Rostedt (Google) * something, we probably want to know about it.
3958e11b521aSSteven Rostedt (Google) */
3959e11b521aSSteven Rostedt (Google)
3960e11b521aSSteven Rostedt (Google) iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3961e11b521aSSteven Rostedt (Google) if (!iter)
3962e11b521aSSteven Rostedt (Google) return -ENOMEM;
3963e11b521aSSteven Rostedt (Google)
3964e11b521aSSteven Rostedt (Google) iter->pg = ftrace_pages_start;
3965e11b521aSSteven Rostedt (Google) iter->flags = FTRACE_ITER_TOUCHED;
3966e11b521aSSteven Rostedt (Google) iter->ops = &global_ops;
3967e11b521aSSteven Rostedt (Google)
3968e11b521aSSteven Rostedt (Google) return 0;
3969e11b521aSSteven Rostedt (Google) }
3970e11b521aSSteven Rostedt (Google)
397183f74441SJiri Olsa static int
ftrace_avail_addrs_open(struct inode * inode,struct file * file)397283f74441SJiri Olsa ftrace_avail_addrs_open(struct inode *inode, struct file *file)
397383f74441SJiri Olsa {
397483f74441SJiri Olsa struct ftrace_iterator *iter;
397583f74441SJiri Olsa int ret;
397683f74441SJiri Olsa
397783f74441SJiri Olsa ret = security_locked_down(LOCKDOWN_TRACEFS);
397883f74441SJiri Olsa if (ret)
397983f74441SJiri Olsa return ret;
398083f74441SJiri Olsa
398183f74441SJiri Olsa if (unlikely(ftrace_disabled))
398283f74441SJiri Olsa return -ENODEV;
398383f74441SJiri Olsa
398483f74441SJiri Olsa iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
398583f74441SJiri Olsa if (!iter)
398683f74441SJiri Olsa return -ENOMEM;
398783f74441SJiri Olsa
398883f74441SJiri Olsa iter->pg = ftrace_pages_start;
398983f74441SJiri Olsa iter->flags = FTRACE_ITER_ADDRS;
399083f74441SJiri Olsa iter->ops = &global_ops;
399183f74441SJiri Olsa
399283f74441SJiri Olsa return 0;
399383f74441SJiri Olsa }
399483f74441SJiri Olsa
3995fc13cb0cSSteven Rostedt /**
3996fc13cb0cSSteven Rostedt * ftrace_regex_open - initialize function tracer filter files
3997fc13cb0cSSteven Rostedt * @ops: The ftrace_ops that hold the hash filters
3998fc13cb0cSSteven Rostedt * @flag: The type of filter to process
3999fc13cb0cSSteven Rostedt * @inode: The inode, usually passed in to your open routine
4000fc13cb0cSSteven Rostedt * @file: The file, usually passed in to your open routine
4001fc13cb0cSSteven Rostedt *
4002fc13cb0cSSteven Rostedt * ftrace_regex_open() initializes the filter files for the
4003fc13cb0cSSteven Rostedt * @ops. Depending on @flag it may process the filter hash or
4004fc13cb0cSSteven Rostedt * the notrace hash of @ops. With this called from the open
4005fc13cb0cSSteven Rostedt * routine, you can use ftrace_filter_write() for the write
4006fc13cb0cSSteven Rostedt * routine if @flag has FTRACE_ITER_FILTER set, or
4007fc13cb0cSSteven Rostedt * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4008098c879eSSteven Rostedt (Red Hat) * tracing_lseek() should be used as the lseek routine, and
4009fc13cb0cSSteven Rostedt * release must call ftrace_regex_release().
4010fc13cb0cSSteven Rostedt */
4011fc13cb0cSSteven Rostedt int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)4012f45948e8SSteven Rostedt ftrace_regex_open(struct ftrace_ops *ops, int flag,
40131cf41dd7SSteven Rostedt struct inode *inode, struct file *file)
40145072c59fSSteven Rostedt {
40155072c59fSSteven Rostedt struct ftrace_iterator *iter;
4016f45948e8SSteven Rostedt struct ftrace_hash *hash;
4017673feb9dSSteven Rostedt (VMware) struct list_head *mod_head;
4018673feb9dSSteven Rostedt (VMware) struct trace_array *tr = ops->private;
40199ef16693SSteven Rostedt (VMware) int ret = -ENOMEM;
40205072c59fSSteven Rostedt
4021f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
4022f04f24fbSMasami Hiramatsu
40234eebcc81SSteven Rostedt if (unlikely(ftrace_disabled))
40244eebcc81SSteven Rostedt return -ENODEV;
40254eebcc81SSteven Rostedt
40268530dec6SSteven Rostedt (VMware) if (tracing_check_open_get_tr(tr))
40279ef16693SSteven Rostedt (VMware) return -ENODEV;
40289ef16693SSteven Rostedt (VMware)
40295072c59fSSteven Rostedt iter = kzalloc(sizeof(*iter), GFP_KERNEL);
40305072c59fSSteven Rostedt if (!iter)
40319ef16693SSteven Rostedt (VMware) goto out;
40325072c59fSSteven Rostedt
40339ef16693SSteven Rostedt (VMware) if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
40349ef16693SSteven Rostedt (VMware) goto out;
4035689fd8b6Sjolsa@redhat.com
40363f2367baSMasami Hiramatsu iter->ops = ops;
40373f2367baSMasami Hiramatsu iter->flags = flag;
40385985ea8bSSteven Rostedt (VMware) iter->tr = tr;
40393f2367baSMasami Hiramatsu
404033b7f99cSSteven Rostedt (Red Hat) mutex_lock(&ops->func_hash->regex_lock);
40413f2367baSMasami Hiramatsu
4042673feb9dSSteven Rostedt (VMware) if (flag & FTRACE_ITER_NOTRACE) {
404333b7f99cSSteven Rostedt (Red Hat) hash = ops->func_hash->notrace_hash;
40445985ea8bSSteven Rostedt (VMware) mod_head = tr ? &tr->mod_notrace : NULL;
4045673feb9dSSteven Rostedt (VMware) } else {
404633b7f99cSSteven Rostedt (Red Hat) hash = ops->func_hash->filter_hash;
40475985ea8bSSteven Rostedt (VMware) mod_head = tr ? &tr->mod_trace : NULL;
4048673feb9dSSteven Rostedt (VMware) }
4049f45948e8SSteven Rostedt
40505985ea8bSSteven Rostedt (VMware) iter->mod_list = mod_head;
40515985ea8bSSteven Rostedt (VMware)
405233dc9b12SSteven Rostedt if (file->f_mode & FMODE_WRITE) {
4053ef2fbe16SNamhyung Kim const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4054ef2fbe16SNamhyung Kim
4055673feb9dSSteven Rostedt (VMware) if (file->f_flags & O_TRUNC) {
4056ef2fbe16SNamhyung Kim iter->hash = alloc_ftrace_hash(size_bits);
4057673feb9dSSteven Rostedt (VMware) clear_ftrace_mod_list(mod_head);
4058673feb9dSSteven Rostedt (VMware) } else {
4059ef2fbe16SNamhyung Kim iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4060673feb9dSSteven Rostedt (VMware) }
4061ef2fbe16SNamhyung Kim
406233dc9b12SSteven Rostedt if (!iter->hash) {
406333dc9b12SSteven Rostedt trace_parser_put(&iter->parser);
40643f2367baSMasami Hiramatsu goto out_unlock;
406533dc9b12SSteven Rostedt }
4066c20489daSSteven Rostedt (VMware) } else
4067c20489daSSteven Rostedt (VMware) iter->hash = hash;
40681cf41dd7SSteven Rostedt
40699ef16693SSteven Rostedt (VMware) ret = 0;
40709ef16693SSteven Rostedt (VMware)
40715072c59fSSteven Rostedt if (file->f_mode & FMODE_READ) {
40725072c59fSSteven Rostedt iter->pg = ftrace_pages_start;
40735072c59fSSteven Rostedt
40745072c59fSSteven Rostedt ret = seq_open(file, &show_ftrace_seq_ops);
40755072c59fSSteven Rostedt if (!ret) {
40765072c59fSSteven Rostedt struct seq_file *m = file->private_data;
40775072c59fSSteven Rostedt m->private = iter;
407879fe249cSLi Zefan } else {
407933dc9b12SSteven Rostedt /* Failed */
408033dc9b12SSteven Rostedt free_ftrace_hash(iter->hash);
408179fe249cSLi Zefan trace_parser_put(&iter->parser);
408279fe249cSLi Zefan }
40835072c59fSSteven Rostedt } else
40845072c59fSSteven Rostedt file->private_data = iter;
40853f2367baSMasami Hiramatsu
40863f2367baSMasami Hiramatsu out_unlock:
408733b7f99cSSteven Rostedt (Red Hat) mutex_unlock(&ops->func_hash->regex_lock);
40885072c59fSSteven Rostedt
40899ef16693SSteven Rostedt (VMware) out:
40909ef16693SSteven Rostedt (VMware) if (ret) {
40919ef16693SSteven Rostedt (VMware) kfree(iter);
40929ef16693SSteven Rostedt (VMware) if (tr)
40939ef16693SSteven Rostedt (VMware) trace_array_put(tr);
40949ef16693SSteven Rostedt (VMware) }
40959ef16693SSteven Rostedt (VMware)
40965072c59fSSteven Rostedt return ret;
40975072c59fSSteven Rostedt }
40985072c59fSSteven Rostedt
409941c52c0dSSteven Rostedt static int
ftrace_filter_open(struct inode * inode,struct file * file)410041c52c0dSSteven Rostedt ftrace_filter_open(struct inode *inode, struct file *file)
410141c52c0dSSteven Rostedt {
4102e3b3e2e8SSteven Rostedt (Red Hat) struct ftrace_ops *ops = inode->i_private;
4103e3b3e2e8SSteven Rostedt (Red Hat)
410417911ff3SSteven Rostedt (VMware) /* Checks for tracefs lockdown */
4105e3b3e2e8SSteven Rostedt (Red Hat) return ftrace_regex_open(ops,
4106eee8ded1SSteven Rostedt (VMware) FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
41071cf41dd7SSteven Rostedt inode, file);
410841c52c0dSSteven Rostedt }
410941c52c0dSSteven Rostedt
411041c52c0dSSteven Rostedt static int
ftrace_notrace_open(struct inode * inode,struct file * file)411141c52c0dSSteven Rostedt ftrace_notrace_open(struct inode *inode, struct file *file)
411241c52c0dSSteven Rostedt {
4113e3b3e2e8SSteven Rostedt (Red Hat) struct ftrace_ops *ops = inode->i_private;
4114e3b3e2e8SSteven Rostedt (Red Hat)
411517911ff3SSteven Rostedt (VMware) /* Checks for tracefs lockdown */
4116e3b3e2e8SSteven Rostedt (Red Hat) return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
41171cf41dd7SSteven Rostedt inode, file);
411841c52c0dSSteven Rostedt }
411941c52c0dSSteven Rostedt
41203ba00929SDmitry Safonov /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
41213ba00929SDmitry Safonov struct ftrace_glob {
41223ba00929SDmitry Safonov char *search;
41233ba00929SDmitry Safonov unsigned len;
41243ba00929SDmitry Safonov int type;
41253ba00929SDmitry Safonov };
41263ba00929SDmitry Safonov
41277132e2d6SThiago Jung Bauermann /*
41287132e2d6SThiago Jung Bauermann * If symbols in an architecture don't correspond exactly to the user-visible
41297132e2d6SThiago Jung Bauermann * name of what they represent, it is possible to define this function to
41307132e2d6SThiago Jung Bauermann * perform the necessary adjustments.
41317132e2d6SThiago Jung Bauermann */
arch_ftrace_match_adjust(char * str,const char * search)41327132e2d6SThiago Jung Bauermann char * __weak arch_ftrace_match_adjust(char *str, const char *search)
41337132e2d6SThiago Jung Bauermann {
41347132e2d6SThiago Jung Bauermann return str;
41357132e2d6SThiago Jung Bauermann }
41367132e2d6SThiago Jung Bauermann
ftrace_match(char * str,struct ftrace_glob * g)41373ba00929SDmitry Safonov static int ftrace_match(char *str, struct ftrace_glob *g)
41389f4801e3SSteven Rostedt {
41399f4801e3SSteven Rostedt int matched = 0;
4140751e9983SLi Zefan int slen;
41419f4801e3SSteven Rostedt
41427132e2d6SThiago Jung Bauermann str = arch_ftrace_match_adjust(str, g->search);
41437132e2d6SThiago Jung Bauermann
41443ba00929SDmitry Safonov switch (g->type) {
41459f4801e3SSteven Rostedt case MATCH_FULL:
41463ba00929SDmitry Safonov if (strcmp(str, g->search) == 0)
41479f4801e3SSteven Rostedt matched = 1;
41489f4801e3SSteven Rostedt break;
41499f4801e3SSteven Rostedt case MATCH_FRONT_ONLY:
41503ba00929SDmitry Safonov if (strncmp(str, g->search, g->len) == 0)
41519f4801e3SSteven Rostedt matched = 1;
41529f4801e3SSteven Rostedt break;
41539f4801e3SSteven Rostedt case MATCH_MIDDLE_ONLY:
41543ba00929SDmitry Safonov if (strstr(str, g->search))
41559f4801e3SSteven Rostedt matched = 1;
41569f4801e3SSteven Rostedt break;
41579f4801e3SSteven Rostedt case MATCH_END_ONLY:
4158751e9983SLi Zefan slen = strlen(str);
41593ba00929SDmitry Safonov if (slen >= g->len &&
41603ba00929SDmitry Safonov memcmp(str + slen - g->len, g->search, g->len) == 0)
41619f4801e3SSteven Rostedt matched = 1;
41629f4801e3SSteven Rostedt break;
416360f1d5e3SMasami Hiramatsu case MATCH_GLOB:
416460f1d5e3SMasami Hiramatsu if (glob_match(g->search, str))
416560f1d5e3SMasami Hiramatsu matched = 1;
416660f1d5e3SMasami Hiramatsu break;
41679f4801e3SSteven Rostedt }
41689f4801e3SSteven Rostedt
41699f4801e3SSteven Rostedt return matched;
41709f4801e3SSteven Rostedt }
41719f4801e3SSteven Rostedt
4172b448c4e3SSteven Rostedt static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int clear_filter)4173f0a3b154SDmitry Safonov enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
417464e7c440SSteven Rostedt {
4175b448c4e3SSteven Rostedt struct ftrace_func_entry *entry;
4176b448c4e3SSteven Rostedt int ret = 0;
4177b448c4e3SSteven Rostedt
41781cf41dd7SSteven Rostedt entry = ftrace_lookup_ip(hash, rec->ip);
4179f0a3b154SDmitry Safonov if (clear_filter) {
4180b448c4e3SSteven Rostedt /* Do nothing if it doesn't exist */
4181b448c4e3SSteven Rostedt if (!entry)
4182b448c4e3SSteven Rostedt return 0;
4183b448c4e3SSteven Rostedt
418433dc9b12SSteven Rostedt free_hash_entry(hash, entry);
4185b448c4e3SSteven Rostedt } else {
4186b448c4e3SSteven Rostedt /* Do nothing if it exists */
4187b448c4e3SSteven Rostedt if (entry)
4188b448c4e3SSteven Rostedt return 0;
4189a12754a8SSteven Rostedt (Google) if (add_hash_entry(hash, rec->ip) == NULL)
4190a12754a8SSteven Rostedt (Google) ret = -ENOMEM;
4191b448c4e3SSteven Rostedt }
4192b448c4e3SSteven Rostedt return ret;
4193996e87beSSteven Rostedt }
4194996e87beSSteven Rostedt
419564e7c440SSteven Rostedt static int
add_rec_by_index(struct ftrace_hash * hash,struct ftrace_glob * func_g,int clear_filter)4196f79b3f33SSteven Rostedt (VMware) add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4197f79b3f33SSteven Rostedt (VMware) int clear_filter)
4198f79b3f33SSteven Rostedt (VMware) {
4199f79b3f33SSteven Rostedt (VMware) long index = simple_strtoul(func_g->search, NULL, 0);
4200f79b3f33SSteven Rostedt (VMware) struct ftrace_page *pg;
4201f79b3f33SSteven Rostedt (VMware) struct dyn_ftrace *rec;
4202f79b3f33SSteven Rostedt (VMware)
4203f79b3f33SSteven Rostedt (VMware) /* The index starts at 1 */
4204f79b3f33SSteven Rostedt (VMware) if (--index < 0)
4205f79b3f33SSteven Rostedt (VMware) return 0;
4206f79b3f33SSteven Rostedt (VMware)
4207f79b3f33SSteven Rostedt (VMware) do_for_each_ftrace_rec(pg, rec) {
4208f79b3f33SSteven Rostedt (VMware) if (pg->index <= index) {
4209f79b3f33SSteven Rostedt (VMware) index -= pg->index;
4210f79b3f33SSteven Rostedt (VMware) /* this is a double loop, break goes to the next page */
4211f79b3f33SSteven Rostedt (VMware) break;
4212f79b3f33SSteven Rostedt (VMware) }
4213f79b3f33SSteven Rostedt (VMware) rec = &pg->records[index];
4214f79b3f33SSteven Rostedt (VMware) enter_record(hash, rec, clear_filter);
4215f79b3f33SSteven Rostedt (VMware) return 1;
4216f79b3f33SSteven Rostedt (VMware) } while_for_each_ftrace_rec();
4217f79b3f33SSteven Rostedt (VMware) return 0;
4218f79b3f33SSteven Rostedt (VMware) }
4219f79b3f33SSteven Rostedt (VMware)
4220b39181f7SSteven Rostedt (Google) #ifdef FTRACE_MCOUNT_MAX_OFFSET
lookup_ip(unsigned long ip,char ** modname,char * str)4221b39181f7SSteven Rostedt (Google) static int lookup_ip(unsigned long ip, char **modname, char *str)
4222b39181f7SSteven Rostedt (Google) {
4223b39181f7SSteven Rostedt (Google) unsigned long offset;
4224b39181f7SSteven Rostedt (Google)
4225b39181f7SSteven Rostedt (Google) kallsyms_lookup(ip, NULL, &offset, modname, str);
4226b39181f7SSteven Rostedt (Google) if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4227b39181f7SSteven Rostedt (Google) return -1;
4228b39181f7SSteven Rostedt (Google) return 0;
4229b39181f7SSteven Rostedt (Google) }
4230b39181f7SSteven Rostedt (Google) #else
lookup_ip(unsigned long ip,char ** modname,char * str)4231b39181f7SSteven Rostedt (Google) static int lookup_ip(unsigned long ip, char **modname, char *str)
4232b39181f7SSteven Rostedt (Google) {
4233b39181f7SSteven Rostedt (Google) kallsyms_lookup(ip, NULL, NULL, modname, str);
4234b39181f7SSteven Rostedt (Google) return 0;
4235b39181f7SSteven Rostedt (Google) }
4236b39181f7SSteven Rostedt (Google) #endif
4237b39181f7SSteven Rostedt (Google)
4238f79b3f33SSteven Rostedt (VMware) static int
ftrace_match_record(struct dyn_ftrace * rec,struct ftrace_glob * func_g,struct ftrace_glob * mod_g,int exclude_mod)42390b507e1eSDmitry Safonov ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
42400b507e1eSDmitry Safonov struct ftrace_glob *mod_g, int exclude_mod)
424164e7c440SSteven Rostedt {
424264e7c440SSteven Rostedt char str[KSYM_SYMBOL_LEN];
4243b9df92d2SSteven Rostedt char *modname;
424464e7c440SSteven Rostedt
4245b39181f7SSteven Rostedt (Google) if (lookup_ip(rec->ip, &modname, str)) {
4246b39181f7SSteven Rostedt (Google) /* This should only happen when a rec is disabled */
4247b39181f7SSteven Rostedt (Google) WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4248b39181f7SSteven Rostedt (Google) !(rec->flags & FTRACE_FL_DISABLED));
4249b39181f7SSteven Rostedt (Google) return 0;
4250b39181f7SSteven Rostedt (Google) }
4251b9df92d2SSteven Rostedt
42520b507e1eSDmitry Safonov if (mod_g) {
42530b507e1eSDmitry Safonov int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
42540b507e1eSDmitry Safonov
42550b507e1eSDmitry Safonov /* blank module name to match all modules */
42560b507e1eSDmitry Safonov if (!mod_g->len) {
42570b507e1eSDmitry Safonov /* blank module globbing: modname xor exclude_mod */
425877c0eddeSSteven Rostedt (VMware) if (!exclude_mod != !modname)
42590b507e1eSDmitry Safonov goto func_match;
42600b507e1eSDmitry Safonov return 0;
42610b507e1eSDmitry Safonov }
42620b507e1eSDmitry Safonov
426377c0eddeSSteven Rostedt (VMware) /*
426477c0eddeSSteven Rostedt (VMware) * exclude_mod is set to trace everything but the given
426577c0eddeSSteven Rostedt (VMware) * module. If it is set and the module matches, then
426677c0eddeSSteven Rostedt (VMware) * return 0. If it is not set, and the module doesn't match
426777c0eddeSSteven Rostedt (VMware) * also return 0. Otherwise, check the function to see if
426877c0eddeSSteven Rostedt (VMware) * that matches.
426977c0eddeSSteven Rostedt (VMware) */
427077c0eddeSSteven Rostedt (VMware) if (!mod_matches == !exclude_mod)
42710b507e1eSDmitry Safonov return 0;
42720b507e1eSDmitry Safonov func_match:
4273b9df92d2SSteven Rostedt /* blank search means to match all funcs in the mod */
42743ba00929SDmitry Safonov if (!func_g->len)
4275b9df92d2SSteven Rostedt return 1;
4276b9df92d2SSteven Rostedt }
4277b9df92d2SSteven Rostedt
42783ba00929SDmitry Safonov return ftrace_match(str, func_g);
427964e7c440SSteven Rostedt }
428064e7c440SSteven Rostedt
42811cf41dd7SSteven Rostedt static int
match_records(struct ftrace_hash * hash,char * func,int len,char * mod)42823ba00929SDmitry Safonov match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
42839f4801e3SSteven Rostedt {
42849f4801e3SSteven Rostedt struct ftrace_page *pg;
42859f4801e3SSteven Rostedt struct dyn_ftrace *rec;
42863ba00929SDmitry Safonov struct ftrace_glob func_g = { .type = MATCH_FULL };
42870b507e1eSDmitry Safonov struct ftrace_glob mod_g = { .type = MATCH_FULL };
42880b507e1eSDmitry Safonov struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
42890b507e1eSDmitry Safonov int exclude_mod = 0;
4290311d16daSLi Zefan int found = 0;
4291b448c4e3SSteven Rostedt int ret;
42922e028c4fSDan Carpenter int clear_filter = 0;
42939f4801e3SSteven Rostedt
42940b507e1eSDmitry Safonov if (func) {
42953ba00929SDmitry Safonov func_g.type = filter_parse_regex(func, len, &func_g.search,
42963ba00929SDmitry Safonov &clear_filter);
42973ba00929SDmitry Safonov func_g.len = strlen(func_g.search);
4298b9df92d2SSteven Rostedt }
4299b9df92d2SSteven Rostedt
43000b507e1eSDmitry Safonov if (mod) {
43010b507e1eSDmitry Safonov mod_g.type = filter_parse_regex(mod, strlen(mod),
43020b507e1eSDmitry Safonov &mod_g.search, &exclude_mod);
43030b507e1eSDmitry Safonov mod_g.len = strlen(mod_g.search);
4304b9df92d2SSteven Rostedt }
4305b9df92d2SSteven Rostedt
430652baf119SSteven Rostedt mutex_lock(&ftrace_lock);
4307b9df92d2SSteven Rostedt
4308b9df92d2SSteven Rostedt if (unlikely(ftrace_disabled))
4309b9df92d2SSteven Rostedt goto out_unlock;
4310b9df92d2SSteven Rostedt
4311f79b3f33SSteven Rostedt (VMware) if (func_g.type == MATCH_INDEX) {
4312f79b3f33SSteven Rostedt (VMware) found = add_rec_by_index(hash, &func_g, clear_filter);
4313f79b3f33SSteven Rostedt (VMware) goto out_unlock;
4314f79b3f33SSteven Rostedt (VMware) }
4315f79b3f33SSteven Rostedt (VMware)
4316265c831cSSteven Rostedt do_for_each_ftrace_rec(pg, rec) {
4317546fece4SSteven Rostedt (Red Hat)
4318546fece4SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_DISABLED)
4319546fece4SSteven Rostedt (Red Hat) continue;
4320546fece4SSteven Rostedt (Red Hat)
43210b507e1eSDmitry Safonov if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4322f0a3b154SDmitry Safonov ret = enter_record(hash, rec, clear_filter);
4323b448c4e3SSteven Rostedt if (ret < 0) {
4324b448c4e3SSteven Rostedt found = ret;
4325b448c4e3SSteven Rostedt goto out_unlock;
4326b448c4e3SSteven Rostedt }
4327311d16daSLi Zefan found = 1;
43285072c59fSSteven Rostedt }
4329d0b24b4eSGuilherme G. Piccoli cond_resched();
4330265c831cSSteven Rostedt } while_for_each_ftrace_rec();
4331b9df92d2SSteven Rostedt out_unlock:
433252baf119SSteven Rostedt mutex_unlock(&ftrace_lock);
4333311d16daSLi Zefan
4334311d16daSLi Zefan return found;
43355072c59fSSteven Rostedt }
43365072c59fSSteven Rostedt
433764e7c440SSteven Rostedt static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)43381cf41dd7SSteven Rostedt ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
433964e7c440SSteven Rostedt {
4340f0a3b154SDmitry Safonov return match_records(hash, buff, len, NULL);
434164e7c440SSteven Rostedt }
434264e7c440SSteven Rostedt
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)4343e16b35ddSSteven Rostedt (VMware) static void ftrace_ops_update_code(struct ftrace_ops *ops,
4344e16b35ddSSteven Rostedt (VMware) struct ftrace_ops_hash *old_hash)
4345e16b35ddSSteven Rostedt (VMware) {
4346e16b35ddSSteven Rostedt (VMware) struct ftrace_ops *op;
4347e16b35ddSSteven Rostedt (VMware)
4348e16b35ddSSteven Rostedt (VMware) if (!ftrace_enabled)
4349e16b35ddSSteven Rostedt (VMware) return;
4350e16b35ddSSteven Rostedt (VMware)
4351e16b35ddSSteven Rostedt (VMware) if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4352e16b35ddSSteven Rostedt (VMware) ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4353e16b35ddSSteven Rostedt (VMware) return;
4354e16b35ddSSteven Rostedt (VMware) }
4355e16b35ddSSteven Rostedt (VMware)
4356e16b35ddSSteven Rostedt (VMware) /*
4357e16b35ddSSteven Rostedt (VMware) * If this is the shared global_ops filter, then we need to
4358e16b35ddSSteven Rostedt (VMware) * check if there is another ops that shares it, is enabled.
4359e16b35ddSSteven Rostedt (VMware) * If so, we still need to run the modify code.
4360e16b35ddSSteven Rostedt (VMware) */
4361e16b35ddSSteven Rostedt (VMware) if (ops->func_hash != &global_ops.local_hash)
4362e16b35ddSSteven Rostedt (VMware) return;
4363e16b35ddSSteven Rostedt (VMware)
4364e16b35ddSSteven Rostedt (VMware) do_for_each_ftrace_op(op, ftrace_ops_list) {
4365e16b35ddSSteven Rostedt (VMware) if (op->func_hash == &global_ops.local_hash &&
4366e16b35ddSSteven Rostedt (VMware) op->flags & FTRACE_OPS_FL_ENABLED) {
4367e16b35ddSSteven Rostedt (VMware) ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4368e16b35ddSSteven Rostedt (VMware) /* Only need to do this once */
4369e16b35ddSSteven Rostedt (VMware) return;
4370e16b35ddSSteven Rostedt (VMware) }
4371e16b35ddSSteven Rostedt (VMware) } while_for_each_ftrace_op(op);
4372e16b35ddSSteven Rostedt (VMware) }
4373e16b35ddSSteven Rostedt (VMware)
ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)4374e16b35ddSSteven Rostedt (VMware) static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4375e16b35ddSSteven Rostedt (VMware) struct ftrace_hash **orig_hash,
4376e16b35ddSSteven Rostedt (VMware) struct ftrace_hash *hash,
4377e16b35ddSSteven Rostedt (VMware) int enable)
4378e16b35ddSSteven Rostedt (VMware) {
4379e16b35ddSSteven Rostedt (VMware) struct ftrace_ops_hash old_hash_ops;
4380e16b35ddSSteven Rostedt (VMware) struct ftrace_hash *old_hash;
4381e16b35ddSSteven Rostedt (VMware) int ret;
4382e16b35ddSSteven Rostedt (VMware)
4383e16b35ddSSteven Rostedt (VMware) old_hash = *orig_hash;
4384e16b35ddSSteven Rostedt (VMware) old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4385e16b35ddSSteven Rostedt (VMware) old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4386e16b35ddSSteven Rostedt (VMware) ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4387e16b35ddSSteven Rostedt (VMware) if (!ret) {
4388e16b35ddSSteven Rostedt (VMware) ftrace_ops_update_code(ops, &old_hash_ops);
4389e16b35ddSSteven Rostedt (VMware) free_ftrace_hash_rcu(old_hash);
4390e16b35ddSSteven Rostedt (VMware) }
4391e16b35ddSSteven Rostedt (VMware) return ret;
4392e16b35ddSSteven Rostedt (VMware) }
439364e7c440SSteven Rostedt
module_exists(const char * module)4394673feb9dSSteven Rostedt (VMware) static bool module_exists(const char *module)
4395673feb9dSSteven Rostedt (VMware) {
4396673feb9dSSteven Rostedt (VMware) /* All modules have the symbol __this_module */
43970f5e5a3aSRasmus Villemoes static const char this_mod[] = "__this_module";
4398419e9fe5SSalvatore Mesoraca char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4399673feb9dSSteven Rostedt (VMware) unsigned long val;
4400673feb9dSSteven Rostedt (VMware) int n;
4401673feb9dSSteven Rostedt (VMware)
4402419e9fe5SSalvatore Mesoraca n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4403673feb9dSSteven Rostedt (VMware)
4404419e9fe5SSalvatore Mesoraca if (n > sizeof(modname) - 1)
4405673feb9dSSteven Rostedt (VMware) return false;
4406673feb9dSSteven Rostedt (VMware)
4407673feb9dSSteven Rostedt (VMware) val = module_kallsyms_lookup_name(modname);
4408673feb9dSSteven Rostedt (VMware) return val != 0;
4409673feb9dSSteven Rostedt (VMware) }
4410673feb9dSSteven Rostedt (VMware)
cache_mod(struct trace_array * tr,const char * func,char * module,int enable)4411673feb9dSSteven Rostedt (VMware) static int cache_mod(struct trace_array *tr,
4412673feb9dSSteven Rostedt (VMware) const char *func, char *module, int enable)
4413673feb9dSSteven Rostedt (VMware) {
4414673feb9dSSteven Rostedt (VMware) struct ftrace_mod_load *ftrace_mod, *n;
4415673feb9dSSteven Rostedt (VMware) struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4416673feb9dSSteven Rostedt (VMware) int ret;
4417673feb9dSSteven Rostedt (VMware)
4418673feb9dSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
4419673feb9dSSteven Rostedt (VMware)
4420673feb9dSSteven Rostedt (VMware) /* We do not cache inverse filters */
4421673feb9dSSteven Rostedt (VMware) if (func[0] == '!') {
4422673feb9dSSteven Rostedt (VMware) func++;
4423673feb9dSSteven Rostedt (VMware) ret = -EINVAL;
4424673feb9dSSteven Rostedt (VMware)
4425673feb9dSSteven Rostedt (VMware) /* Look to remove this hash */
4426673feb9dSSteven Rostedt (VMware) list_for_each_entry_safe(ftrace_mod, n, head, list) {
4427673feb9dSSteven Rostedt (VMware) if (strcmp(ftrace_mod->module, module) != 0)
4428673feb9dSSteven Rostedt (VMware) continue;
4429673feb9dSSteven Rostedt (VMware)
4430673feb9dSSteven Rostedt (VMware) /* no func matches all */
443144925dffSDan Carpenter if (strcmp(func, "*") == 0 ||
4432673feb9dSSteven Rostedt (VMware) (ftrace_mod->func &&
4433673feb9dSSteven Rostedt (VMware) strcmp(ftrace_mod->func, func) == 0)) {
4434673feb9dSSteven Rostedt (VMware) ret = 0;
4435673feb9dSSteven Rostedt (VMware) free_ftrace_mod(ftrace_mod);
4436673feb9dSSteven Rostedt (VMware) continue;
4437673feb9dSSteven Rostedt (VMware) }
4438673feb9dSSteven Rostedt (VMware) }
4439673feb9dSSteven Rostedt (VMware) goto out;
4440673feb9dSSteven Rostedt (VMware) }
4441673feb9dSSteven Rostedt (VMware)
4442673feb9dSSteven Rostedt (VMware) ret = -EINVAL;
4443673feb9dSSteven Rostedt (VMware) /* We only care about modules that have not been loaded yet */
4444673feb9dSSteven Rostedt (VMware) if (module_exists(module))
4445673feb9dSSteven Rostedt (VMware) goto out;
4446673feb9dSSteven Rostedt (VMware)
4447673feb9dSSteven Rostedt (VMware) /* Save this string off, and execute it when the module is loaded */
4448673feb9dSSteven Rostedt (VMware) ret = ftrace_add_mod(tr, func, module, enable);
4449673feb9dSSteven Rostedt (VMware) out:
4450673feb9dSSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
4451673feb9dSSteven Rostedt (VMware)
4452673feb9dSSteven Rostedt (VMware) return ret;
4453673feb9dSSteven Rostedt (VMware) }
4454673feb9dSSteven Rostedt (VMware)
4455d7fbf8dfSSteven Rostedt (VMware) static int
4456d7fbf8dfSSteven Rostedt (VMware) ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4457d7fbf8dfSSteven Rostedt (VMware) int reset, int enable);
4458d7fbf8dfSSteven Rostedt (VMware)
445969449bbdSArnd Bergmann #ifdef CONFIG_MODULES
process_mod_list(struct list_head * head,struct ftrace_ops * ops,char * mod,bool enable)4460d7fbf8dfSSteven Rostedt (VMware) static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4461d7fbf8dfSSteven Rostedt (VMware) char *mod, bool enable)
4462d7fbf8dfSSteven Rostedt (VMware) {
4463d7fbf8dfSSteven Rostedt (VMware) struct ftrace_mod_load *ftrace_mod, *n;
4464d7fbf8dfSSteven Rostedt (VMware) struct ftrace_hash **orig_hash, *new_hash;
4465d7fbf8dfSSteven Rostedt (VMware) LIST_HEAD(process_mods);
4466d7fbf8dfSSteven Rostedt (VMware) char *func;
4467d7fbf8dfSSteven Rostedt (VMware)
4468d7fbf8dfSSteven Rostedt (VMware) mutex_lock(&ops->func_hash->regex_lock);
4469d7fbf8dfSSteven Rostedt (VMware)
4470d7fbf8dfSSteven Rostedt (VMware) if (enable)
4471d7fbf8dfSSteven Rostedt (VMware) orig_hash = &ops->func_hash->filter_hash;
4472d7fbf8dfSSteven Rostedt (VMware) else
4473d7fbf8dfSSteven Rostedt (VMware) orig_hash = &ops->func_hash->notrace_hash;
4474d7fbf8dfSSteven Rostedt (VMware)
4475d7fbf8dfSSteven Rostedt (VMware) new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4476d7fbf8dfSSteven Rostedt (VMware) *orig_hash);
4477d7fbf8dfSSteven Rostedt (VMware) if (!new_hash)
44783b58a3c7SSteven Rostedt (VMware) goto out; /* warn? */
4479d7fbf8dfSSteven Rostedt (VMware)
4480d7fbf8dfSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
4481d7fbf8dfSSteven Rostedt (VMware)
4482d7fbf8dfSSteven Rostedt (VMware) list_for_each_entry_safe(ftrace_mod, n, head, list) {
4483d7fbf8dfSSteven Rostedt (VMware)
4484d7fbf8dfSSteven Rostedt (VMware) if (strcmp(ftrace_mod->module, mod) != 0)
4485d7fbf8dfSSteven Rostedt (VMware) continue;
4486d7fbf8dfSSteven Rostedt (VMware)
4487d7fbf8dfSSteven Rostedt (VMware) if (ftrace_mod->func)
4488d7fbf8dfSSteven Rostedt (VMware) func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4489d7fbf8dfSSteven Rostedt (VMware) else
4490d7fbf8dfSSteven Rostedt (VMware) func = kstrdup("*", GFP_KERNEL);
4491d7fbf8dfSSteven Rostedt (VMware)
4492d7fbf8dfSSteven Rostedt (VMware) if (!func) /* warn? */
4493d7fbf8dfSSteven Rostedt (VMware) continue;
4494d7fbf8dfSSteven Rostedt (VMware)
44953ecda644SBaokun Li list_move(&ftrace_mod->list, &process_mods);
4496d7fbf8dfSSteven Rostedt (VMware)
4497d7fbf8dfSSteven Rostedt (VMware) /* Use the newly allocated func, as it may be "*" */
4498d7fbf8dfSSteven Rostedt (VMware) kfree(ftrace_mod->func);
4499d7fbf8dfSSteven Rostedt (VMware) ftrace_mod->func = func;
4500d7fbf8dfSSteven Rostedt (VMware) }
4501d7fbf8dfSSteven Rostedt (VMware)
4502d7fbf8dfSSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
4503d7fbf8dfSSteven Rostedt (VMware)
4504d7fbf8dfSSteven Rostedt (VMware) list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4505d7fbf8dfSSteven Rostedt (VMware)
4506d7fbf8dfSSteven Rostedt (VMware) func = ftrace_mod->func;
4507d7fbf8dfSSteven Rostedt (VMware)
4508d7fbf8dfSSteven Rostedt (VMware) /* Grabs ftrace_lock, which is why we have this extra step */
4509d7fbf8dfSSteven Rostedt (VMware) match_records(new_hash, func, strlen(func), mod);
4510d7fbf8dfSSteven Rostedt (VMware) free_ftrace_mod(ftrace_mod);
4511d7fbf8dfSSteven Rostedt (VMware) }
4512d7fbf8dfSSteven Rostedt (VMware)
45138c08f0d5SSteven Rostedt (VMware) if (enable && list_empty(head))
45148c08f0d5SSteven Rostedt (VMware) new_hash->flags &= ~FTRACE_HASH_FL_MOD;
45158c08f0d5SSteven Rostedt (VMware)
4516d7fbf8dfSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
4517d7fbf8dfSSteven Rostedt (VMware)
4518045e269cSAlex Shi ftrace_hash_move_and_update_ops(ops, orig_hash,
4519d7fbf8dfSSteven Rostedt (VMware) new_hash, enable);
4520d7fbf8dfSSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
4521d7fbf8dfSSteven Rostedt (VMware)
45223b58a3c7SSteven Rostedt (VMware) out:
4523d7fbf8dfSSteven Rostedt (VMware) mutex_unlock(&ops->func_hash->regex_lock);
4524d7fbf8dfSSteven Rostedt (VMware)
4525d7fbf8dfSSteven Rostedt (VMware) free_ftrace_hash(new_hash);
4526d7fbf8dfSSteven Rostedt (VMware) }
4527d7fbf8dfSSteven Rostedt (VMware)
process_cached_mods(const char * mod_name)4528d7fbf8dfSSteven Rostedt (VMware) static void process_cached_mods(const char *mod_name)
4529d7fbf8dfSSteven Rostedt (VMware) {
4530d7fbf8dfSSteven Rostedt (VMware) struct trace_array *tr;
4531d7fbf8dfSSteven Rostedt (VMware) char *mod;
4532d7fbf8dfSSteven Rostedt (VMware)
4533d7fbf8dfSSteven Rostedt (VMware) mod = kstrdup(mod_name, GFP_KERNEL);
4534d7fbf8dfSSteven Rostedt (VMware) if (!mod)
4535d7fbf8dfSSteven Rostedt (VMware) return;
4536d7fbf8dfSSteven Rostedt (VMware)
4537d7fbf8dfSSteven Rostedt (VMware) mutex_lock(&trace_types_lock);
4538d7fbf8dfSSteven Rostedt (VMware) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4539d7fbf8dfSSteven Rostedt (VMware) if (!list_empty(&tr->mod_trace))
4540d7fbf8dfSSteven Rostedt (VMware) process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4541d7fbf8dfSSteven Rostedt (VMware) if (!list_empty(&tr->mod_notrace))
4542d7fbf8dfSSteven Rostedt (VMware) process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4543d7fbf8dfSSteven Rostedt (VMware) }
4544d7fbf8dfSSteven Rostedt (VMware) mutex_unlock(&trace_types_lock);
4545d7fbf8dfSSteven Rostedt (VMware)
4546d7fbf8dfSSteven Rostedt (VMware) kfree(mod);
4547d7fbf8dfSSteven Rostedt (VMware) }
454869449bbdSArnd Bergmann #endif
4549d7fbf8dfSSteven Rostedt (VMware)
4550f6180773SSteven Rostedt /*
4551f6180773SSteven Rostedt * We register the module command as a template to show others how
4552f6180773SSteven Rostedt * to register the a command as well.
4553f6180773SSteven Rostedt */
4554f6180773SSteven Rostedt
4555f6180773SSteven Rostedt static int
ftrace_mod_callback(struct trace_array * tr,struct ftrace_hash * hash,char * func_orig,char * cmd,char * module,int enable)455604ec7bb6SSteven Rostedt (VMware) ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4557673feb9dSSteven Rostedt (VMware) char *func_orig, char *cmd, char *module, int enable)
4558f6180773SSteven Rostedt {
4559673feb9dSSteven Rostedt (VMware) char *func;
45605e3949f0SDmitry Safonov int ret;
4561f6180773SSteven Rostedt
45628a92dc4dSguoweikang if (!tr)
45638a92dc4dSguoweikang return -ENODEV;
45648a92dc4dSguoweikang
4565673feb9dSSteven Rostedt (VMware) /* match_records() modifies func, and we need the original */
4566673feb9dSSteven Rostedt (VMware) func = kstrdup(func_orig, GFP_KERNEL);
4567673feb9dSSteven Rostedt (VMware) if (!func)
4568673feb9dSSteven Rostedt (VMware) return -ENOMEM;
4569673feb9dSSteven Rostedt (VMware)
4570f6180773SSteven Rostedt /*
4571f6180773SSteven Rostedt * cmd == 'mod' because we only registered this func
4572f6180773SSteven Rostedt * for the 'mod' ftrace_func_command.
4573f6180773SSteven Rostedt * But if you register one func with multiple commands,
4574f6180773SSteven Rostedt * you can tell which command was used by the cmd
4575f6180773SSteven Rostedt * parameter.
4576f6180773SSteven Rostedt */
4577f0a3b154SDmitry Safonov ret = match_records(hash, func, strlen(func), module);
4578673feb9dSSteven Rostedt (VMware) kfree(func);
4579673feb9dSSteven Rostedt (VMware)
4580b448c4e3SSteven Rostedt if (!ret)
4581673feb9dSSteven Rostedt (VMware) return cache_mod(tr, func_orig, module, enable);
4582b448c4e3SSteven Rostedt if (ret < 0)
4583b448c4e3SSteven Rostedt return ret;
4584f6180773SSteven Rostedt return 0;
4585f6180773SSteven Rostedt }
4586f6180773SSteven Rostedt
4587f6180773SSteven Rostedt static struct ftrace_func_command ftrace_mod_cmd = {
4588f6180773SSteven Rostedt .name = "mod",
4589f6180773SSteven Rostedt .func = ftrace_mod_callback,
4590f6180773SSteven Rostedt };
4591f6180773SSteven Rostedt
ftrace_mod_cmd_init(void)4592f6180773SSteven Rostedt static int __init ftrace_mod_cmd_init(void)
4593f6180773SSteven Rostedt {
4594f6180773SSteven Rostedt return register_ftrace_command(&ftrace_mod_cmd);
4595f6180773SSteven Rostedt }
45966f415672SSteven Rostedt core_initcall(ftrace_mod_cmd_init);
4597f6180773SSteven Rostedt
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)45982f5f6ad9SSteven Rostedt static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4599d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
460059df055fSSteven Rostedt {
4601eee8ded1SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops;
46027b60f3d8SSteven Rostedt (VMware) struct ftrace_func_probe *probe;
460359df055fSSteven Rostedt
46047b60f3d8SSteven Rostedt (VMware) probe = container_of(op, struct ftrace_func_probe, ops);
46057b60f3d8SSteven Rostedt (VMware) probe_ops = probe->probe_ops;
460659df055fSSteven Rostedt
460759df055fSSteven Rostedt /*
460859df055fSSteven Rostedt * Disable preemption for these calls to prevent a RCU grace
460959df055fSSteven Rostedt * period. This syncs the hash iteration and freeing of items
461059df055fSSteven Rostedt * on the hash. rcu_read_lock is too dangerous here.
461159df055fSSteven Rostedt */
46125168ae50SSteven Rostedt preempt_disable_notrace();
46136e444319SSteven Rostedt (VMware) probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
46145168ae50SSteven Rostedt preempt_enable_notrace();
461559df055fSSteven Rostedt }
461659df055fSSteven Rostedt
461741794f19SSteven Rostedt (VMware) struct ftrace_func_map {
461841794f19SSteven Rostedt (VMware) struct ftrace_func_entry entry;
461941794f19SSteven Rostedt (VMware) void *data;
462059df055fSSteven Rostedt };
462159df055fSSteven Rostedt
462241794f19SSteven Rostedt (VMware) struct ftrace_func_mapper {
462341794f19SSteven Rostedt (VMware) struct ftrace_hash hash;
462441794f19SSteven Rostedt (VMware) };
462559df055fSSteven Rostedt
462641794f19SSteven Rostedt (VMware) /**
462741794f19SSteven Rostedt (VMware) * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
462841794f19SSteven Rostedt (VMware) *
462941794f19SSteven Rostedt (VMware) * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
463041794f19SSteven Rostedt (VMware) */
allocate_ftrace_func_mapper(void)463141794f19SSteven Rostedt (VMware) struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
463259df055fSSteven Rostedt {
4633e1df4cb6SSteven Rostedt (Red Hat) struct ftrace_hash *hash;
463459df055fSSteven Rostedt
463541794f19SSteven Rostedt (VMware) /*
463641794f19SSteven Rostedt (VMware) * The mapper is simply a ftrace_hash, but since the entries
463741794f19SSteven Rostedt (VMware) * in the hash are not ftrace_func_entry type, we define it
463841794f19SSteven Rostedt (VMware) * as a separate structure.
463941794f19SSteven Rostedt (VMware) */
464041794f19SSteven Rostedt (VMware) hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
464141794f19SSteven Rostedt (VMware) return (struct ftrace_func_mapper *)hash;
4642e1df4cb6SSteven Rostedt (Red Hat) }
4643e1df4cb6SSteven Rostedt (Red Hat)
464441794f19SSteven Rostedt (VMware) /**
464541794f19SSteven Rostedt (VMware) * ftrace_func_mapper_find_ip - Find some data mapped to an ip
464641794f19SSteven Rostedt (VMware) * @mapper: The mapper that has the ip maps
464741794f19SSteven Rostedt (VMware) * @ip: the instruction pointer to find the data for
464841794f19SSteven Rostedt (VMware) *
464941794f19SSteven Rostedt (VMware) * Returns the data mapped to @ip if found otherwise NULL. The return
465041794f19SSteven Rostedt (VMware) * is actually the address of the mapper data pointer. The address is
465141794f19SSteven Rostedt (VMware) * returned for use cases where the data is no bigger than a long, and
465241794f19SSteven Rostedt (VMware) * the user can use the data pointer as its data instead of having to
465341794f19SSteven Rostedt (VMware) * allocate more memory for the reference.
465441794f19SSteven Rostedt (VMware) */
ftrace_func_mapper_find_ip(struct ftrace_func_mapper * mapper,unsigned long ip)465541794f19SSteven Rostedt (VMware) void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
465641794f19SSteven Rostedt (VMware) unsigned long ip)
465741794f19SSteven Rostedt (VMware) {
465841794f19SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
465941794f19SSteven Rostedt (VMware) struct ftrace_func_map *map;
466041794f19SSteven Rostedt (VMware)
466141794f19SSteven Rostedt (VMware) entry = ftrace_lookup_ip(&mapper->hash, ip);
466241794f19SSteven Rostedt (VMware) if (!entry)
466341794f19SSteven Rostedt (VMware) return NULL;
466441794f19SSteven Rostedt (VMware)
466541794f19SSteven Rostedt (VMware) map = (struct ftrace_func_map *)entry;
466641794f19SSteven Rostedt (VMware) return &map->data;
4667e1df4cb6SSteven Rostedt (Red Hat) }
466845a4a237SSteven Rostedt
466941794f19SSteven Rostedt (VMware) /**
467041794f19SSteven Rostedt (VMware) * ftrace_func_mapper_add_ip - Map some data to an ip
467141794f19SSteven Rostedt (VMware) * @mapper: The mapper that has the ip maps
467241794f19SSteven Rostedt (VMware) * @ip: The instruction pointer address to map @data to
467341794f19SSteven Rostedt (VMware) * @data: The data to map to @ip
467441794f19SSteven Rostedt (VMware) *
4675fdda88d3SQiujun Huang * Returns 0 on success otherwise an error.
467641794f19SSteven Rostedt (VMware) */
ftrace_func_mapper_add_ip(struct ftrace_func_mapper * mapper,unsigned long ip,void * data)467741794f19SSteven Rostedt (VMware) int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
467841794f19SSteven Rostedt (VMware) unsigned long ip, void *data)
467941794f19SSteven Rostedt (VMware) {
468041794f19SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
468141794f19SSteven Rostedt (VMware) struct ftrace_func_map *map;
468241794f19SSteven Rostedt (VMware)
468341794f19SSteven Rostedt (VMware) entry = ftrace_lookup_ip(&mapper->hash, ip);
468441794f19SSteven Rostedt (VMware) if (entry)
468541794f19SSteven Rostedt (VMware) return -EBUSY;
468641794f19SSteven Rostedt (VMware)
468741794f19SSteven Rostedt (VMware) map = kmalloc(sizeof(*map), GFP_KERNEL);
468841794f19SSteven Rostedt (VMware) if (!map)
468941794f19SSteven Rostedt (VMware) return -ENOMEM;
469041794f19SSteven Rostedt (VMware)
469141794f19SSteven Rostedt (VMware) map->entry.ip = ip;
469241794f19SSteven Rostedt (VMware) map->data = data;
469341794f19SSteven Rostedt (VMware)
469441794f19SSteven Rostedt (VMware) __add_hash_entry(&mapper->hash, &map->entry);
469541794f19SSteven Rostedt (VMware)
469641794f19SSteven Rostedt (VMware) return 0;
469741794f19SSteven Rostedt (VMware) }
469841794f19SSteven Rostedt (VMware)
469941794f19SSteven Rostedt (VMware) /**
470041794f19SSteven Rostedt (VMware) * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
470141794f19SSteven Rostedt (VMware) * @mapper: The mapper that has the ip maps
470241794f19SSteven Rostedt (VMware) * @ip: The instruction pointer address to remove the data from
470341794f19SSteven Rostedt (VMware) *
470441794f19SSteven Rostedt (VMware) * Returns the data if it is found, otherwise NULL.
470541794f19SSteven Rostedt (VMware) * Note, if the data pointer is used as the data itself, (see
470641794f19SSteven Rostedt (VMware) * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
470741794f19SSteven Rostedt (VMware) * if the data pointer was set to zero.
470841794f19SSteven Rostedt (VMware) */
ftrace_func_mapper_remove_ip(struct ftrace_func_mapper * mapper,unsigned long ip)470941794f19SSteven Rostedt (VMware) void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
471041794f19SSteven Rostedt (VMware) unsigned long ip)
471141794f19SSteven Rostedt (VMware) {
471241794f19SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
471341794f19SSteven Rostedt (VMware) struct ftrace_func_map *map;
471441794f19SSteven Rostedt (VMware) void *data;
471541794f19SSteven Rostedt (VMware)
471641794f19SSteven Rostedt (VMware) entry = ftrace_lookup_ip(&mapper->hash, ip);
471741794f19SSteven Rostedt (VMware) if (!entry)
471841794f19SSteven Rostedt (VMware) return NULL;
471941794f19SSteven Rostedt (VMware)
472041794f19SSteven Rostedt (VMware) map = (struct ftrace_func_map *)entry;
472141794f19SSteven Rostedt (VMware) data = map->data;
472241794f19SSteven Rostedt (VMware)
472341794f19SSteven Rostedt (VMware) remove_hash_entry(&mapper->hash, entry);
472441794f19SSteven Rostedt (VMware) kfree(entry);
472541794f19SSteven Rostedt (VMware)
472641794f19SSteven Rostedt (VMware) return data;
472741794f19SSteven Rostedt (VMware) }
472841794f19SSteven Rostedt (VMware)
472941794f19SSteven Rostedt (VMware) /**
473041794f19SSteven Rostedt (VMware) * free_ftrace_func_mapper - free a mapping of ips and data
473141794f19SSteven Rostedt (VMware) * @mapper: The mapper that has the ip maps
473241794f19SSteven Rostedt (VMware) * @free_func: A function to be called on each data item.
473341794f19SSteven Rostedt (VMware) *
473441794f19SSteven Rostedt (VMware) * This is used to free the function mapper. The @free_func is optional
473541794f19SSteven Rostedt (VMware) * and can be used if the data needs to be freed as well.
473641794f19SSteven Rostedt (VMware) */
free_ftrace_func_mapper(struct ftrace_func_mapper * mapper,ftrace_mapper_func free_func)473741794f19SSteven Rostedt (VMware) void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
473841794f19SSteven Rostedt (VMware) ftrace_mapper_func free_func)
473941794f19SSteven Rostedt (VMware) {
474041794f19SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
474141794f19SSteven Rostedt (VMware) struct ftrace_func_map *map;
474241794f19SSteven Rostedt (VMware) struct hlist_head *hhd;
474304e03d9aSWei Li int size, i;
474404e03d9aSWei Li
474504e03d9aSWei Li if (!mapper)
474604e03d9aSWei Li return;
474741794f19SSteven Rostedt (VMware)
474841794f19SSteven Rostedt (VMware) if (free_func && mapper->hash.count) {
474904e03d9aSWei Li size = 1 << mapper->hash.size_bits;
475041794f19SSteven Rostedt (VMware) for (i = 0; i < size; i++) {
475141794f19SSteven Rostedt (VMware) hhd = &mapper->hash.buckets[i];
475241794f19SSteven Rostedt (VMware) hlist_for_each_entry(entry, hhd, hlist) {
475341794f19SSteven Rostedt (VMware) map = (struct ftrace_func_map *)entry;
475441794f19SSteven Rostedt (VMware) free_func(map);
475541794f19SSteven Rostedt (VMware) }
475641794f19SSteven Rostedt (VMware) }
475741794f19SSteven Rostedt (VMware) }
475841794f19SSteven Rostedt (VMware) free_ftrace_hash(&mapper->hash);
475941794f19SSteven Rostedt (VMware) }
476041794f19SSteven Rostedt (VMware)
release_probe(struct ftrace_func_probe * probe)47617b60f3d8SSteven Rostedt (VMware) static void release_probe(struct ftrace_func_probe *probe)
47627b60f3d8SSteven Rostedt (VMware) {
47637b60f3d8SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops;
47647b60f3d8SSteven Rostedt (VMware)
47655ae0bf59SSteven Rostedt (Red Hat) mutex_lock(&ftrace_lock);
47665ae0bf59SSteven Rostedt (Red Hat)
47677b60f3d8SSteven Rostedt (VMware) WARN_ON(probe->ref <= 0);
476859df055fSSteven Rostedt
47697b60f3d8SSteven Rostedt (VMware) /* Subtract the ref that was used to protect this instance */
47707b60f3d8SSteven Rostedt (VMware) probe->ref--;
4771546fece4SSteven Rostedt (Red Hat)
47727b60f3d8SSteven Rostedt (VMware) if (!probe->ref) {
47737b60f3d8SSteven Rostedt (VMware) probe_ops = probe->probe_ops;
47746e444319SSteven Rostedt (VMware) /*
47756e444319SSteven Rostedt (VMware) * Sending zero as ip tells probe_ops to free
47766e444319SSteven Rostedt (VMware) * the probe->data itself
47776e444319SSteven Rostedt (VMware) */
47786e444319SSteven Rostedt (VMware) if (probe_ops->free)
47796e444319SSteven Rostedt (VMware) probe_ops->free(probe_ops, probe->tr, 0, probe->data);
47807b60f3d8SSteven Rostedt (VMware) list_del(&probe->list);
47817b60f3d8SSteven Rostedt (VMware) kfree(probe);
47827b60f3d8SSteven Rostedt (VMware) }
47837b60f3d8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
478459df055fSSteven Rostedt }
478559df055fSSteven Rostedt
acquire_probe_locked(struct ftrace_func_probe * probe)47867b60f3d8SSteven Rostedt (VMware) static void acquire_probe_locked(struct ftrace_func_probe *probe)
47877b60f3d8SSteven Rostedt (VMware) {
47887b60f3d8SSteven Rostedt (VMware) /*
47897b60f3d8SSteven Rostedt (VMware) * Add one ref to keep it from being freed when releasing the
47907b60f3d8SSteven Rostedt (VMware) * ftrace_lock mutex.
47917b60f3d8SSteven Rostedt (VMware) */
47927b60f3d8SSteven Rostedt (VMware) probe->ref++;
47937b60f3d8SSteven Rostedt (VMware) }
479459df055fSSteven Rostedt
479559df055fSSteven Rostedt int
register_ftrace_function_probe(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops,void * data)479604ec7bb6SSteven Rostedt (VMware) register_ftrace_function_probe(char *glob, struct trace_array *tr,
47977b60f3d8SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops,
47987b60f3d8SSteven Rostedt (VMware) void *data)
479959df055fSSteven Rostedt {
4800ba27d855SJakob Koschel struct ftrace_func_probe *probe = NULL, *iter;
48011ec3a81aSSteven Rostedt (VMware) struct ftrace_func_entry *entry;
48021ec3a81aSSteven Rostedt (VMware) struct ftrace_hash **orig_hash;
48031ec3a81aSSteven Rostedt (VMware) struct ftrace_hash *old_hash;
480459df055fSSteven Rostedt struct ftrace_hash *hash;
480559df055fSSteven Rostedt int count = 0;
48061ec3a81aSSteven Rostedt (VMware) int size;
480759df055fSSteven Rostedt int ret;
48081ec3a81aSSteven Rostedt (VMware) int i;
480959df055fSSteven Rostedt
481004ec7bb6SSteven Rostedt (VMware) if (WARN_ON(!tr))
481104ec7bb6SSteven Rostedt (VMware) return -EINVAL;
481204ec7bb6SSteven Rostedt (VMware)
48131ec3a81aSSteven Rostedt (VMware) /* We do not support '!' for function probes */
48141ec3a81aSSteven Rostedt (VMware) if (WARN_ON(glob[0] == '!'))
481559df055fSSteven Rostedt return -EINVAL;
481659df055fSSteven Rostedt
48177b60f3d8SSteven Rostedt (VMware)
48187b60f3d8SSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
48197b60f3d8SSteven Rostedt (VMware) /* Check if the probe_ops is already registered */
4820ba27d855SJakob Koschel list_for_each_entry(iter, &tr->func_probes, list) {
4821ba27d855SJakob Koschel if (iter->probe_ops == probe_ops) {
4822ba27d855SJakob Koschel probe = iter;
48237b60f3d8SSteven Rostedt (VMware) break;
48247b60f3d8SSteven Rostedt (VMware) }
4825ba27d855SJakob Koschel }
4826ba27d855SJakob Koschel if (!probe) {
48277b60f3d8SSteven Rostedt (VMware) probe = kzalloc(sizeof(*probe), GFP_KERNEL);
48287b60f3d8SSteven Rostedt (VMware) if (!probe) {
48297b60f3d8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
48307b60f3d8SSteven Rostedt (VMware) return -ENOMEM;
48317b60f3d8SSteven Rostedt (VMware) }
48327b60f3d8SSteven Rostedt (VMware) probe->probe_ops = probe_ops;
48337b60f3d8SSteven Rostedt (VMware) probe->ops.func = function_trace_probe_call;
48347b60f3d8SSteven Rostedt (VMware) probe->tr = tr;
48357b60f3d8SSteven Rostedt (VMware) ftrace_ops_init(&probe->ops);
48367b60f3d8SSteven Rostedt (VMware) list_add(&probe->list, &tr->func_probes);
48371ec3a81aSSteven Rostedt (VMware) }
483859df055fSSteven Rostedt
48397b60f3d8SSteven Rostedt (VMware) acquire_probe_locked(probe);
484059df055fSSteven Rostedt
48417b60f3d8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
48427b60f3d8SSteven Rostedt (VMware)
4843372e0d01SSteven Rostedt (VMware) /*
4844372e0d01SSteven Rostedt (VMware) * Note, there's a small window here that the func_hash->filter_hash
4845fdda88d3SQiujun Huang * may be NULL or empty. Need to be careful when reading the loop.
4846372e0d01SSteven Rostedt (VMware) */
48477b60f3d8SSteven Rostedt (VMware) mutex_lock(&probe->ops.func_hash->regex_lock);
48487b60f3d8SSteven Rostedt (VMware)
48497b60f3d8SSteven Rostedt (VMware) orig_hash = &probe->ops.func_hash->filter_hash;
48501ec3a81aSSteven Rostedt (VMware) old_hash = *orig_hash;
485159df055fSSteven Rostedt hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
48521ec3a81aSSteven Rostedt (VMware)
48535b0022ddSNaveen N. Rao if (!hash) {
48545b0022ddSNaveen N. Rao ret = -ENOMEM;
48555b0022ddSNaveen N. Rao goto out;
48565b0022ddSNaveen N. Rao }
48575b0022ddSNaveen N. Rao
48581ec3a81aSSteven Rostedt (VMware) ret = ftrace_match_records(hash, glob, strlen(glob));
48591ec3a81aSSteven Rostedt (VMware)
48601ec3a81aSSteven Rostedt (VMware) /* Nothing found? */
48611ec3a81aSSteven Rostedt (VMware) if (!ret)
48621ec3a81aSSteven Rostedt (VMware) ret = -EINVAL;
48631ec3a81aSSteven Rostedt (VMware)
48641ec3a81aSSteven Rostedt (VMware) if (ret < 0)
486559df055fSSteven Rostedt goto out;
486659df055fSSteven Rostedt
48671ec3a81aSSteven Rostedt (VMware) size = 1 << hash->size_bits;
48681ec3a81aSSteven Rostedt (VMware) for (i = 0; i < size; i++) {
48691ec3a81aSSteven Rostedt (VMware) hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
48701ec3a81aSSteven Rostedt (VMware) if (ftrace_lookup_ip(old_hash, entry->ip))
487159df055fSSteven Rostedt continue;
487259df055fSSteven Rostedt /*
487359df055fSSteven Rostedt * The caller might want to do something special
487459df055fSSteven Rostedt * for each function we find. We call the callback
487559df055fSSteven Rostedt * to give the caller an opportunity to do so.
487659df055fSSteven Rostedt */
48777b60f3d8SSteven Rostedt (VMware) if (probe_ops->init) {
48787b60f3d8SSteven Rostedt (VMware) ret = probe_ops->init(probe_ops, tr,
48796e444319SSteven Rostedt (VMware) entry->ip, data,
48806e444319SSteven Rostedt (VMware) &probe->data);
48816e444319SSteven Rostedt (VMware) if (ret < 0) {
48826e444319SSteven Rostedt (VMware) if (probe_ops->free && count)
48836e444319SSteven Rostedt (VMware) probe_ops->free(probe_ops, tr,
48846e444319SSteven Rostedt (VMware) 0, probe->data);
48856e444319SSteven Rostedt (VMware) probe->data = NULL;
4886eee8ded1SSteven Rostedt (VMware) goto out;
48871ec3a81aSSteven Rostedt (VMware) }
48886e444319SSteven Rostedt (VMware) }
48891ec3a81aSSteven Rostedt (VMware) count++;
489059df055fSSteven Rostedt }
489159df055fSSteven Rostedt }
489259df055fSSteven Rostedt
48931ec3a81aSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
48941ec3a81aSSteven Rostedt (VMware)
48957b60f3d8SSteven Rostedt (VMware) if (!count) {
48967b60f3d8SSteven Rostedt (VMware) /* Nothing was added? */
48977b60f3d8SSteven Rostedt (VMware) ret = -EINVAL;
4898e1df4cb6SSteven Rostedt (Red Hat) goto out_unlock;
4899e1df4cb6SSteven Rostedt (Red Hat) }
4900e1df4cb6SSteven Rostedt (Red Hat)
49017b60f3d8SSteven Rostedt (VMware) ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
49021ec3a81aSSteven Rostedt (VMware) hash, 1);
49031ec3a81aSSteven Rostedt (VMware) if (ret < 0)
49048d70725eSSteven Rostedt (VMware) goto err_unlock;
490559df055fSSteven Rostedt
49067b60f3d8SSteven Rostedt (VMware) /* One ref for each new function traced */
49077b60f3d8SSteven Rostedt (VMware) probe->ref += count;
490859df055fSSteven Rostedt
49097b60f3d8SSteven Rostedt (VMware) if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
49107b60f3d8SSteven Rostedt (VMware) ret = ftrace_startup(&probe->ops, 0);
4911e1df4cb6SSteven Rostedt (Red Hat)
491259df055fSSteven Rostedt out_unlock:
49135ae0bf59SSteven Rostedt (Red Hat) mutex_unlock(&ftrace_lock);
491459df055fSSteven Rostedt
491559df055fSSteven Rostedt if (!ret)
49161ec3a81aSSteven Rostedt (VMware) ret = count;
49175ae0bf59SSteven Rostedt (Red Hat) out:
49187b60f3d8SSteven Rostedt (VMware) mutex_unlock(&probe->ops.func_hash->regex_lock);
4919e1df4cb6SSteven Rostedt (Red Hat) free_ftrace_hash(hash);
492059df055fSSteven Rostedt
49217b60f3d8SSteven Rostedt (VMware) release_probe(probe);
49227b60f3d8SSteven Rostedt (VMware)
49231ec3a81aSSteven Rostedt (VMware) return ret;
49248d70725eSSteven Rostedt (VMware)
49258d70725eSSteven Rostedt (VMware) err_unlock:
49267b60f3d8SSteven Rostedt (VMware) if (!probe_ops->free || !count)
49278d70725eSSteven Rostedt (VMware) goto out_unlock;
49288d70725eSSteven Rostedt (VMware)
49298d70725eSSteven Rostedt (VMware) /* Failed to do the move, need to call the free functions */
49308d70725eSSteven Rostedt (VMware) for (i = 0; i < size; i++) {
49318d70725eSSteven Rostedt (VMware) hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
49328d70725eSSteven Rostedt (VMware) if (ftrace_lookup_ip(old_hash, entry->ip))
49338d70725eSSteven Rostedt (VMware) continue;
49346e444319SSteven Rostedt (VMware) probe_ops->free(probe_ops, tr, entry->ip, probe->data);
49358d70725eSSteven Rostedt (VMware) }
49368d70725eSSteven Rostedt (VMware) }
49378d70725eSSteven Rostedt (VMware) goto out_unlock;
493859df055fSSteven Rostedt }
493959df055fSSteven Rostedt
4940d3d532d7SSteven Rostedt (VMware) int
unregister_ftrace_function_probe_func(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops)49417b60f3d8SSteven Rostedt (VMware) unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
49427b60f3d8SSteven Rostedt (VMware) struct ftrace_probe_ops *probe_ops)
494359df055fSSteven Rostedt {
4944ba27d855SJakob Koschel struct ftrace_func_probe *probe = NULL, *iter;
494582cc4fc2SSteven Rostedt (VMware) struct ftrace_ops_hash old_hash_ops;
4946eee8ded1SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
49473ba00929SDmitry Safonov struct ftrace_glob func_g;
49481ec3a81aSSteven Rostedt (VMware) struct ftrace_hash **orig_hash;
49491ec3a81aSSteven Rostedt (VMware) struct ftrace_hash *old_hash;
49501ec3a81aSSteven Rostedt (VMware) struct ftrace_hash *hash = NULL;
4951b67bfe0dSSasha Levin struct hlist_node *tmp;
4952eee8ded1SSteven Rostedt (VMware) struct hlist_head hhd;
495359df055fSSteven Rostedt char str[KSYM_SYMBOL_LEN];
49547b60f3d8SSteven Rostedt (VMware) int count = 0;
49557b60f3d8SSteven Rostedt (VMware) int i, ret = -ENODEV;
4956eee8ded1SSteven Rostedt (VMware) int size;
495759df055fSSteven Rostedt
4958cbab567cSNaveen N. Rao if (!glob || !strlen(glob) || !strcmp(glob, "*"))
49593ba00929SDmitry Safonov func_g.search = NULL;
4960cbab567cSNaveen N. Rao else {
496159df055fSSteven Rostedt int not;
496259df055fSSteven Rostedt
49633ba00929SDmitry Safonov func_g.type = filter_parse_regex(glob, strlen(glob),
49643ba00929SDmitry Safonov &func_g.search, ¬);
49653ba00929SDmitry Safonov func_g.len = strlen(func_g.search);
496659df055fSSteven Rostedt
4967b6887d79SSteven Rostedt /* we do not support '!' for function probes */
496859df055fSSteven Rostedt if (WARN_ON(not))
4969d3d532d7SSteven Rostedt (VMware) return -EINVAL;
497059df055fSSteven Rostedt }
497159df055fSSteven Rostedt
49727b60f3d8SSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
49737b60f3d8SSteven Rostedt (VMware) /* Check if the probe_ops is already registered */
4974ba27d855SJakob Koschel list_for_each_entry(iter, &tr->func_probes, list) {
4975ba27d855SJakob Koschel if (iter->probe_ops == probe_ops) {
4976ba27d855SJakob Koschel probe = iter;
49777b60f3d8SSteven Rostedt (VMware) break;
49787b60f3d8SSteven Rostedt (VMware) }
4979ba27d855SJakob Koschel }
4980ba27d855SJakob Koschel if (!probe)
49817b60f3d8SSteven Rostedt (VMware) goto err_unlock_ftrace;
49821ec3a81aSSteven Rostedt (VMware)
49831ec3a81aSSteven Rostedt (VMware) ret = -EINVAL;
49847b60f3d8SSteven Rostedt (VMware) if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
49857b60f3d8SSteven Rostedt (VMware) goto err_unlock_ftrace;
49867b60f3d8SSteven Rostedt (VMware)
49877b60f3d8SSteven Rostedt (VMware) acquire_probe_locked(probe);
49887b60f3d8SSteven Rostedt (VMware)
49897b60f3d8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
49907b60f3d8SSteven Rostedt (VMware)
49917b60f3d8SSteven Rostedt (VMware) mutex_lock(&probe->ops.func_hash->regex_lock);
49927b60f3d8SSteven Rostedt (VMware)
49937b60f3d8SSteven Rostedt (VMware) orig_hash = &probe->ops.func_hash->filter_hash;
49947b60f3d8SSteven Rostedt (VMware) old_hash = *orig_hash;
49957b60f3d8SSteven Rostedt (VMware)
49961ec3a81aSSteven Rostedt (VMware) if (ftrace_hash_empty(old_hash))
49971ec3a81aSSteven Rostedt (VMware) goto out_unlock;
4998e1df4cb6SSteven Rostedt (Red Hat)
499982cc4fc2SSteven Rostedt (VMware) old_hash_ops.filter_hash = old_hash;
500082cc4fc2SSteven Rostedt (VMware) /* Probes only have filters */
500182cc4fc2SSteven Rostedt (VMware) old_hash_ops.notrace_hash = NULL;
500282cc4fc2SSteven Rostedt (VMware)
5003d3d532d7SSteven Rostedt (VMware) ret = -ENOMEM;
50041ec3a81aSSteven Rostedt (VMware) hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5005e1df4cb6SSteven Rostedt (Red Hat) if (!hash)
5006e1df4cb6SSteven Rostedt (Red Hat) goto out_unlock;
5007e1df4cb6SSteven Rostedt (Red Hat)
5008eee8ded1SSteven Rostedt (VMware) INIT_HLIST_HEAD(&hhd);
50097818b388SSteven Rostedt (Red Hat)
5010eee8ded1SSteven Rostedt (VMware) size = 1 << hash->size_bits;
5011eee8ded1SSteven Rostedt (VMware) for (i = 0; i < size; i++) {
5012eee8ded1SSteven Rostedt (VMware) hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
501359df055fSSteven Rostedt
50143ba00929SDmitry Safonov if (func_g.search) {
501559df055fSSteven Rostedt kallsyms_lookup(entry->ip, NULL, NULL,
501659df055fSSteven Rostedt NULL, str);
50173ba00929SDmitry Safonov if (!ftrace_match(str, &func_g))
501859df055fSSteven Rostedt continue;
501959df055fSSteven Rostedt }
50207b60f3d8SSteven Rostedt (VMware) count++;
5021eee8ded1SSteven Rostedt (VMware) remove_hash_entry(hash, entry);
5022eee8ded1SSteven Rostedt (VMware) hlist_add_head(&entry->hlist, &hhd);
502359df055fSSteven Rostedt }
502459df055fSSteven Rostedt }
5025d3d532d7SSteven Rostedt (VMware)
5026d3d532d7SSteven Rostedt (VMware) /* Nothing found? */
50277b60f3d8SSteven Rostedt (VMware) if (!count) {
5028d3d532d7SSteven Rostedt (VMware) ret = -EINVAL;
5029d3d532d7SSteven Rostedt (VMware) goto out_unlock;
5030d3d532d7SSteven Rostedt (VMware) }
5031d3d532d7SSteven Rostedt (VMware)
50323f2367baSMasami Hiramatsu mutex_lock(&ftrace_lock);
50331ec3a81aSSteven Rostedt (VMware)
50347b60f3d8SSteven Rostedt (VMware) WARN_ON(probe->ref < count);
5035eee8ded1SSteven Rostedt (VMware)
50367b60f3d8SSteven Rostedt (VMware) probe->ref -= count;
50371ec3a81aSSteven Rostedt (VMware)
50387b60f3d8SSteven Rostedt (VMware) if (ftrace_hash_empty(hash))
50397b60f3d8SSteven Rostedt (VMware) ftrace_shutdown(&probe->ops, 0);
50407b60f3d8SSteven Rostedt (VMware)
50417b60f3d8SSteven Rostedt (VMware) ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
50421ec3a81aSSteven Rostedt (VMware) hash, 1);
504382cc4fc2SSteven Rostedt (VMware)
504482cc4fc2SSteven Rostedt (VMware) /* still need to update the function call sites */
50451ec3a81aSSteven Rostedt (VMware) if (ftrace_enabled && !ftrace_hash_empty(hash))
50467b60f3d8SSteven Rostedt (VMware) ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
504782cc4fc2SSteven Rostedt (VMware) &old_hash_ops);
504874401729SPaul E. McKenney synchronize_rcu();
50493296fc4eSSteven Rostedt (Red Hat)
5050eee8ded1SSteven Rostedt (VMware) hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5051eee8ded1SSteven Rostedt (VMware) hlist_del(&entry->hlist);
50527b60f3d8SSteven Rostedt (VMware) if (probe_ops->free)
50536e444319SSteven Rostedt (VMware) probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5054eee8ded1SSteven Rostedt (VMware) kfree(entry);
50557818b388SSteven Rostedt (Red Hat) }
50563f2367baSMasami Hiramatsu mutex_unlock(&ftrace_lock);
50577818b388SSteven Rostedt (Red Hat)
5058e1df4cb6SSteven Rostedt (Red Hat) out_unlock:
50597b60f3d8SSteven Rostedt (VMware) mutex_unlock(&probe->ops.func_hash->regex_lock);
5060e1df4cb6SSteven Rostedt (Red Hat) free_ftrace_hash(hash);
506159df055fSSteven Rostedt
50627b60f3d8SSteven Rostedt (VMware) release_probe(probe);
506359df055fSSteven Rostedt
50647b60f3d8SSteven Rostedt (VMware) return ret;
506559df055fSSteven Rostedt
50667b60f3d8SSteven Rostedt (VMware) err_unlock_ftrace:
50677b60f3d8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
5068d3d532d7SSteven Rostedt (VMware) return ret;
506959df055fSSteven Rostedt }
507059df055fSSteven Rostedt
clear_ftrace_function_probes(struct trace_array * tr)5071a0e6369eSNaveen N. Rao void clear_ftrace_function_probes(struct trace_array *tr)
5072a0e6369eSNaveen N. Rao {
5073a0e6369eSNaveen N. Rao struct ftrace_func_probe *probe, *n;
5074a0e6369eSNaveen N. Rao
5075a0e6369eSNaveen N. Rao list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5076a0e6369eSNaveen N. Rao unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5077a0e6369eSNaveen N. Rao }
5078a0e6369eSNaveen N. Rao
5079f6180773SSteven Rostedt static LIST_HEAD(ftrace_commands);
5080f6180773SSteven Rostedt static DEFINE_MUTEX(ftrace_cmd_mutex);
5081f6180773SSteven Rostedt
508238de93abSTom Zanussi /*
508338de93abSTom Zanussi * Currently we only register ftrace commands from __init, so mark this
508438de93abSTom Zanussi * __init too.
508538de93abSTom Zanussi */
register_ftrace_command(struct ftrace_func_command * cmd)508638de93abSTom Zanussi __init int register_ftrace_command(struct ftrace_func_command *cmd)
5087f6180773SSteven Rostedt {
5088f6180773SSteven Rostedt struct ftrace_func_command *p;
5089f6180773SSteven Rostedt int ret = 0;
5090f6180773SSteven Rostedt
5091f6180773SSteven Rostedt mutex_lock(&ftrace_cmd_mutex);
5092f6180773SSteven Rostedt list_for_each_entry(p, &ftrace_commands, list) {
5093f6180773SSteven Rostedt if (strcmp(cmd->name, p->name) == 0) {
5094f6180773SSteven Rostedt ret = -EBUSY;
5095f6180773SSteven Rostedt goto out_unlock;
5096f6180773SSteven Rostedt }
5097f6180773SSteven Rostedt }
5098f6180773SSteven Rostedt list_add(&cmd->list, &ftrace_commands);
5099f6180773SSteven Rostedt out_unlock:
5100f6180773SSteven Rostedt mutex_unlock(&ftrace_cmd_mutex);
5101f6180773SSteven Rostedt
5102f6180773SSteven Rostedt return ret;
5103f6180773SSteven Rostedt }
5104f6180773SSteven Rostedt
510538de93abSTom Zanussi /*
510638de93abSTom Zanussi * Currently we only unregister ftrace commands from __init, so mark
510738de93abSTom Zanussi * this __init too.
510838de93abSTom Zanussi */
unregister_ftrace_command(struct ftrace_func_command * cmd)510938de93abSTom Zanussi __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5110f6180773SSteven Rostedt {
5111f6180773SSteven Rostedt struct ftrace_func_command *p, *n;
5112f6180773SSteven Rostedt int ret = -ENODEV;
5113f6180773SSteven Rostedt
5114f6180773SSteven Rostedt mutex_lock(&ftrace_cmd_mutex);
5115f6180773SSteven Rostedt list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5116f6180773SSteven Rostedt if (strcmp(cmd->name, p->name) == 0) {
5117f6180773SSteven Rostedt ret = 0;
5118f6180773SSteven Rostedt list_del_init(&p->list);
5119f6180773SSteven Rostedt goto out_unlock;
5120f6180773SSteven Rostedt }
5121f6180773SSteven Rostedt }
5122f6180773SSteven Rostedt out_unlock:
5123f6180773SSteven Rostedt mutex_unlock(&ftrace_cmd_mutex);
5124f6180773SSteven Rostedt
5125f6180773SSteven Rostedt return ret;
5126f6180773SSteven Rostedt }
5127f6180773SSteven Rostedt
ftrace_process_regex(struct ftrace_iterator * iter,char * buff,int len,int enable)512804ec7bb6SSteven Rostedt (VMware) static int ftrace_process_regex(struct ftrace_iterator *iter,
512933dc9b12SSteven Rostedt char *buff, int len, int enable)
513064e7c440SSteven Rostedt {
513104ec7bb6SSteven Rostedt (VMware) struct ftrace_hash *hash = iter->hash;
5132d2afd57aSSteven Rostedt (VMware) struct trace_array *tr = iter->ops->private;
5133f6180773SSteven Rostedt char *func, *command, *next = buff;
51346a24a244SSteven Rostedt struct ftrace_func_command *p;
51350aff1c0cSGuoWen Li int ret = -EINVAL;
513664e7c440SSteven Rostedt
513764e7c440SSteven Rostedt func = strsep(&next, ":");
513864e7c440SSteven Rostedt
513964e7c440SSteven Rostedt if (!next) {
51401cf41dd7SSteven Rostedt ret = ftrace_match_records(hash, func, len);
5141b448c4e3SSteven Rostedt if (!ret)
5142b448c4e3SSteven Rostedt ret = -EINVAL;
5143b448c4e3SSteven Rostedt if (ret < 0)
5144311d16daSLi Zefan return ret;
5145b448c4e3SSteven Rostedt return 0;
514664e7c440SSteven Rostedt }
514764e7c440SSteven Rostedt
5148f6180773SSteven Rostedt /* command found */
514964e7c440SSteven Rostedt
515064e7c440SSteven Rostedt command = strsep(&next, ":");
515164e7c440SSteven Rostedt
5152f6180773SSteven Rostedt mutex_lock(&ftrace_cmd_mutex);
5153f6180773SSteven Rostedt list_for_each_entry(p, &ftrace_commands, list) {
5154f6180773SSteven Rostedt if (strcmp(p->name, command) == 0) {
515504ec7bb6SSteven Rostedt (VMware) ret = p->func(tr, hash, func, command, next, enable);
5156f6180773SSteven Rostedt goto out_unlock;
515764e7c440SSteven Rostedt }
5158f6180773SSteven Rostedt }
5159f6180773SSteven Rostedt out_unlock:
5160f6180773SSteven Rostedt mutex_unlock(&ftrace_cmd_mutex);
516164e7c440SSteven Rostedt
5162f6180773SSteven Rostedt return ret;
516364e7c440SSteven Rostedt }
516464e7c440SSteven Rostedt
5165e309b41dSIngo Molnar static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)516641c52c0dSSteven Rostedt ftrace_regex_write(struct file *file, const char __user *ubuf,
516741c52c0dSSteven Rostedt size_t cnt, loff_t *ppos, int enable)
51685072c59fSSteven Rostedt {
51695072c59fSSteven Rostedt struct ftrace_iterator *iter;
5170689fd8b6Sjolsa@redhat.com struct trace_parser *parser;
5171689fd8b6Sjolsa@redhat.com ssize_t ret, read;
51725072c59fSSteven Rostedt
51734ba7978eSLi Zefan if (!cnt)
51745072c59fSSteven Rostedt return 0;
51755072c59fSSteven Rostedt
51765072c59fSSteven Rostedt if (file->f_mode & FMODE_READ) {
51775072c59fSSteven Rostedt struct seq_file *m = file->private_data;
51785072c59fSSteven Rostedt iter = m->private;
51795072c59fSSteven Rostedt } else
51805072c59fSSteven Rostedt iter = file->private_data;
51815072c59fSSteven Rostedt
5182f04f24fbSMasami Hiramatsu if (unlikely(ftrace_disabled))
51833f2367baSMasami Hiramatsu return -ENODEV;
51843f2367baSMasami Hiramatsu
51853f2367baSMasami Hiramatsu /* iter->hash is a local copy, so we don't need regex_lock */
5186f04f24fbSMasami Hiramatsu
5187689fd8b6Sjolsa@redhat.com parser = &iter->parser;
5188689fd8b6Sjolsa@redhat.com read = trace_get_user(parser, ubuf, cnt, ppos);
51895072c59fSSteven Rostedt
51904ba7978eSLi Zefan if (read >= 0 && trace_parser_loaded(parser) &&
5191689fd8b6Sjolsa@redhat.com !trace_parser_cont(parser)) {
519204ec7bb6SSteven Rostedt (VMware) ret = ftrace_process_regex(iter, parser->buffer,
5193689fd8b6Sjolsa@redhat.com parser->idx, enable);
5194313254a9SLi Zefan trace_parser_clear(parser);
51957c088b51SSteven Rostedt (Red Hat) if (ret < 0)
51963f2367baSMasami Hiramatsu goto out;
51975072c59fSSteven Rostedt }
51985072c59fSSteven Rostedt
51995072c59fSSteven Rostedt ret = read;
52003f2367baSMasami Hiramatsu out:
52015072c59fSSteven Rostedt return ret;
52025072c59fSSteven Rostedt }
52035072c59fSSteven Rostedt
5204fc13cb0cSSteven Rostedt ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)520541c52c0dSSteven Rostedt ftrace_filter_write(struct file *file, const char __user *ubuf,
520641c52c0dSSteven Rostedt size_t cnt, loff_t *ppos)
520741c52c0dSSteven Rostedt {
520841c52c0dSSteven Rostedt return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
520941c52c0dSSteven Rostedt }
521041c52c0dSSteven Rostedt
5211fc13cb0cSSteven Rostedt ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)521241c52c0dSSteven Rostedt ftrace_notrace_write(struct file *file, const char __user *ubuf,
521341c52c0dSSteven Rostedt size_t cnt, loff_t *ppos)
521441c52c0dSSteven Rostedt {
521541c52c0dSSteven Rostedt return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
521641c52c0dSSteven Rostedt }
521741c52c0dSSteven Rostedt
521833dc9b12SSteven Rostedt static int
__ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)52194f554e95SJiri Olsa __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5220647664eaSMasami Hiramatsu {
5221647664eaSMasami Hiramatsu struct ftrace_func_entry *entry;
5222647664eaSMasami Hiramatsu
5223aebfd125SPeter Zijlstra ip = ftrace_location(ip);
5224aebfd125SPeter Zijlstra if (!ip)
5225647664eaSMasami Hiramatsu return -EINVAL;
5226647664eaSMasami Hiramatsu
5227647664eaSMasami Hiramatsu if (remove) {
5228647664eaSMasami Hiramatsu entry = ftrace_lookup_ip(hash, ip);
5229647664eaSMasami Hiramatsu if (!entry)
5230647664eaSMasami Hiramatsu return -ENOENT;
5231647664eaSMasami Hiramatsu free_hash_entry(hash, entry);
5232647664eaSMasami Hiramatsu return 0;
523359bdc12fSSteven Rostedt } else if (__ftrace_lookup_ip(hash, ip) != NULL) {
523459bdc12fSSteven Rostedt /* Already exists */
523559bdc12fSSteven Rostedt return 0;
5236647664eaSMasami Hiramatsu }
5237647664eaSMasami Hiramatsu
5238a12754a8SSteven Rostedt (Google) entry = add_hash_entry(hash, ip);
5239a12754a8SSteven Rostedt (Google) return entry ? 0 : -ENOMEM;
5240647664eaSMasami Hiramatsu }
5241647664eaSMasami Hiramatsu
5242647664eaSMasami Hiramatsu static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long * ips,unsigned int cnt,int remove)52434f554e95SJiri Olsa ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
52444f554e95SJiri Olsa unsigned int cnt, int remove)
52454f554e95SJiri Olsa {
52464f554e95SJiri Olsa unsigned int i;
52474f554e95SJiri Olsa int err;
52484f554e95SJiri Olsa
52494f554e95SJiri Olsa for (i = 0; i < cnt; i++) {
52504f554e95SJiri Olsa err = __ftrace_match_addr(hash, ips[i], remove);
52514f554e95SJiri Olsa if (err) {
52524f554e95SJiri Olsa /*
52534f554e95SJiri Olsa * This expects the @hash is a temporary hash and if this
52544f554e95SJiri Olsa * fails the caller must free the @hash.
52554f554e95SJiri Olsa */
52564f554e95SJiri Olsa return err;
52574f554e95SJiri Olsa }
52584f554e95SJiri Olsa }
52594f554e95SJiri Olsa return 0;
52604f554e95SJiri Olsa }
52614f554e95SJiri Olsa
52624f554e95SJiri Olsa static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5263647664eaSMasami Hiramatsu ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
52644f554e95SJiri Olsa unsigned long *ips, unsigned int cnt,
52654f554e95SJiri Olsa int remove, int reset, int enable)
526641c52c0dSSteven Rostedt {
526733dc9b12SSteven Rostedt struct ftrace_hash **orig_hash;
5268f45948e8SSteven Rostedt struct ftrace_hash *hash;
526933dc9b12SSteven Rostedt int ret;
5270f45948e8SSteven Rostedt
527141c52c0dSSteven Rostedt if (unlikely(ftrace_disabled))
527233dc9b12SSteven Rostedt return -ENODEV;
527341c52c0dSSteven Rostedt
527433b7f99cSSteven Rostedt (Red Hat) mutex_lock(&ops->func_hash->regex_lock);
52753f2367baSMasami Hiramatsu
5276f45948e8SSteven Rostedt if (enable)
527733b7f99cSSteven Rostedt (Red Hat) orig_hash = &ops->func_hash->filter_hash;
5278f45948e8SSteven Rostedt else
527933b7f99cSSteven Rostedt (Red Hat) orig_hash = &ops->func_hash->notrace_hash;
528033dc9b12SSteven Rostedt
5281b972cc58SWang Nan if (reset)
5282b972cc58SWang Nan hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5283b972cc58SWang Nan else
528433dc9b12SSteven Rostedt hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5285b972cc58SWang Nan
52863f2367baSMasami Hiramatsu if (!hash) {
52873f2367baSMasami Hiramatsu ret = -ENOMEM;
52883f2367baSMasami Hiramatsu goto out_regex_unlock;
52893f2367baSMasami Hiramatsu }
5290f45948e8SSteven Rostedt
5291ac483c44SJiri Olsa if (buf && !ftrace_match_records(hash, buf, len)) {
5292ac483c44SJiri Olsa ret = -EINVAL;
5293ac483c44SJiri Olsa goto out_regex_unlock;
5294ac483c44SJiri Olsa }
52954f554e95SJiri Olsa if (ips) {
52964f554e95SJiri Olsa ret = ftrace_match_addr(hash, ips, cnt, remove);
5297647664eaSMasami Hiramatsu if (ret < 0)
5298647664eaSMasami Hiramatsu goto out_regex_unlock;
5299647664eaSMasami Hiramatsu }
530033dc9b12SSteven Rostedt
530133dc9b12SSteven Rostedt mutex_lock(&ftrace_lock);
5302e16b35ddSSteven Rostedt (VMware) ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
530333dc9b12SSteven Rostedt mutex_unlock(&ftrace_lock);
530433dc9b12SSteven Rostedt
5305ac483c44SJiri Olsa out_regex_unlock:
530633b7f99cSSteven Rostedt (Red Hat) mutex_unlock(&ops->func_hash->regex_lock);
530733dc9b12SSteven Rostedt
530833dc9b12SSteven Rostedt free_ftrace_hash(hash);
530933dc9b12SSteven Rostedt return ret;
531041c52c0dSSteven Rostedt }
531141c52c0dSSteven Rostedt
5312647664eaSMasami Hiramatsu static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)53134f554e95SJiri Olsa ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
53144f554e95SJiri Olsa int remove, int reset, int enable)
5315647664eaSMasami Hiramatsu {
53164f554e95SJiri Olsa return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5317647664eaSMasami Hiramatsu }
5318647664eaSMasami Hiramatsu
5319763e34e7SSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5320013bf0daSSteven Rostedt (VMware)
5321013bf0daSSteven Rostedt (VMware) struct ftrace_direct_func {
5322013bf0daSSteven Rostedt (VMware) struct list_head next;
5323013bf0daSSteven Rostedt (VMware) unsigned long addr;
5324013bf0daSSteven Rostedt (VMware) int count;
5325013bf0daSSteven Rostedt (VMware) };
5326013bf0daSSteven Rostedt (VMware)
5327013bf0daSSteven Rostedt (VMware) static LIST_HEAD(ftrace_direct_funcs);
5328013bf0daSSteven Rostedt (VMware)
532953cd885bSSong Liu static int register_ftrace_function_nolock(struct ftrace_ops *ops);
533053cd885bSSong Liu
5331b1ac5b88SMasami Hiramatsu (Google) /*
5332b1ac5b88SMasami Hiramatsu (Google) * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5333b1ac5b88SMasami Hiramatsu (Google) * call will be jumped from ftrace_regs_caller. Only if the architecture does
5334b1ac5b88SMasami Hiramatsu (Google) * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5335b1ac5b88SMasami Hiramatsu (Google) * jumps from ftrace_caller for multiple ftrace_ops.
5336b1ac5b88SMasami Hiramatsu (Google) */
5337ab945090SPetr Pavlu #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
533860c89718SFlorent Revest #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5339b1ac5b88SMasami Hiramatsu (Google) #else
5340b1ac5b88SMasami Hiramatsu (Google) #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5341b1ac5b88SMasami Hiramatsu (Google) #endif
5342f64dd462SJiri Olsa
check_direct_multi(struct ftrace_ops * ops)5343f64dd462SJiri Olsa static int check_direct_multi(struct ftrace_ops *ops)
5344f64dd462SJiri Olsa {
5345f64dd462SJiri Olsa if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5346f64dd462SJiri Olsa return -EINVAL;
5347f64dd462SJiri Olsa if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5348f64dd462SJiri Olsa return -EINVAL;
5349f64dd462SJiri Olsa return 0;
5350f64dd462SJiri Olsa }
5351f64dd462SJiri Olsa
remove_direct_functions_hash(struct ftrace_hash * hash,unsigned long addr)5352f64dd462SJiri Olsa static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5353f64dd462SJiri Olsa {
5354f64dd462SJiri Olsa struct ftrace_func_entry *entry, *del;
5355f64dd462SJiri Olsa int size, i;
5356f64dd462SJiri Olsa
5357f64dd462SJiri Olsa size = 1 << hash->size_bits;
5358f64dd462SJiri Olsa for (i = 0; i < size; i++) {
5359f64dd462SJiri Olsa hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5360f64dd462SJiri Olsa del = __ftrace_lookup_ip(direct_functions, entry->ip);
5361f64dd462SJiri Olsa if (del && del->direct == addr) {
5362f64dd462SJiri Olsa remove_hash_entry(direct_functions, del);
5363f64dd462SJiri Olsa kfree(del);
5364f64dd462SJiri Olsa }
5365f64dd462SJiri Olsa }
5366f64dd462SJiri Olsa }
5367f64dd462SJiri Olsa }
5368f64dd462SJiri Olsa
5369f64dd462SJiri Olsa /**
5370da8bdfbdSFlorent Revest * register_ftrace_direct - Call a custom trampoline directly
5371f64dd462SJiri Olsa * for multiple functions registered in @ops
5372f64dd462SJiri Olsa * @ops: The address of the struct ftrace_ops object
5373f64dd462SJiri Olsa * @addr: The address of the trampoline to call at @ops functions
5374f64dd462SJiri Olsa *
5375f64dd462SJiri Olsa * This is used to connect a direct calls to @addr from the nop locations
5376f64dd462SJiri Olsa * of the functions registered in @ops (with by ftrace_set_filter_ip
5377f64dd462SJiri Olsa * function).
5378f64dd462SJiri Olsa *
5379f64dd462SJiri Olsa * The location that it calls (@addr) must be able to handle a direct call,
5380f64dd462SJiri Olsa * and save the parameters of the function being traced, and restore them
5381f64dd462SJiri Olsa * (or inject new ones if needed), before returning.
5382f64dd462SJiri Olsa *
5383f64dd462SJiri Olsa * Returns:
5384f64dd462SJiri Olsa * 0 on success
5385f64dd462SJiri Olsa * -EINVAL - The @ops object was already registered with this call or
5386f64dd462SJiri Olsa * when there are no functions in @ops object.
5387f64dd462SJiri Olsa * -EBUSY - Another direct function is already attached (there can be only one)
5388f64dd462SJiri Olsa * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5389f64dd462SJiri Olsa * -ENOMEM - There was an allocation failure.
5390f64dd462SJiri Olsa */
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)5391da8bdfbdSFlorent Revest int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5392f64dd462SJiri Olsa {
5393a12754a8SSteven Rostedt (Google) struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
5394f64dd462SJiri Olsa struct ftrace_func_entry *entry, *new;
5395f64dd462SJiri Olsa int err = -EBUSY, size, i;
5396f64dd462SJiri Olsa
5397f64dd462SJiri Olsa if (ops->func || ops->trampoline)
5398f64dd462SJiri Olsa return -EINVAL;
5399f64dd462SJiri Olsa if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5400f64dd462SJiri Olsa return -EINVAL;
5401f64dd462SJiri Olsa if (ops->flags & FTRACE_OPS_FL_ENABLED)
5402f64dd462SJiri Olsa return -EINVAL;
5403f64dd462SJiri Olsa
5404f64dd462SJiri Olsa hash = ops->func_hash->filter_hash;
5405f64dd462SJiri Olsa if (ftrace_hash_empty(hash))
5406f64dd462SJiri Olsa return -EINVAL;
5407f64dd462SJiri Olsa
5408f64dd462SJiri Olsa mutex_lock(&direct_mutex);
5409f64dd462SJiri Olsa
5410f64dd462SJiri Olsa /* Make sure requested entries are not already registered.. */
5411f64dd462SJiri Olsa size = 1 << hash->size_bits;
5412f64dd462SJiri Olsa for (i = 0; i < size; i++) {
5413f64dd462SJiri Olsa hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5414f64dd462SJiri Olsa if (ftrace_find_rec_direct(entry->ip))
5415f64dd462SJiri Olsa goto out_unlock;
5416f64dd462SJiri Olsa }
5417f64dd462SJiri Olsa }
5418f64dd462SJiri Olsa
5419f64dd462SJiri Olsa err = -ENOMEM;
5420a12754a8SSteven Rostedt (Google)
5421a12754a8SSteven Rostedt (Google) /* Make a copy hash to place the new and the old entries in */
5422a12754a8SSteven Rostedt (Google) size = hash->count + direct_functions->count;
5423a12754a8SSteven Rostedt (Google) if (size > 32)
5424a12754a8SSteven Rostedt (Google) size = 32;
5425a12754a8SSteven Rostedt (Google) new_hash = alloc_ftrace_hash(fls(size));
5426a12754a8SSteven Rostedt (Google) if (!new_hash)
5427a12754a8SSteven Rostedt (Google) goto out_unlock;
5428a12754a8SSteven Rostedt (Google)
5429a12754a8SSteven Rostedt (Google) /* Now copy over the existing direct entries */
5430a12754a8SSteven Rostedt (Google) size = 1 << direct_functions->size_bits;
5431a12754a8SSteven Rostedt (Google) for (i = 0; i < size; i++) {
5432a12754a8SSteven Rostedt (Google) hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
5433a12754a8SSteven Rostedt (Google) new = add_hash_entry(new_hash, entry->ip);
5434a12754a8SSteven Rostedt (Google) if (!new)
5435a12754a8SSteven Rostedt (Google) goto out_unlock;
5436a12754a8SSteven Rostedt (Google) new->direct = entry->direct;
5437a12754a8SSteven Rostedt (Google) }
5438a12754a8SSteven Rostedt (Google) }
5439a12754a8SSteven Rostedt (Google)
5440a12754a8SSteven Rostedt (Google) /* ... and add the new entries */
5441a12754a8SSteven Rostedt (Google) size = 1 << hash->size_bits;
5442f64dd462SJiri Olsa for (i = 0; i < size; i++) {
5443f64dd462SJiri Olsa hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5444a12754a8SSteven Rostedt (Google) new = add_hash_entry(new_hash, entry->ip);
5445f64dd462SJiri Olsa if (!new)
5446a12754a8SSteven Rostedt (Google) goto out_unlock;
5447a12754a8SSteven Rostedt (Google) /* Update both the copy and the hash entry */
5448a12754a8SSteven Rostedt (Google) new->direct = addr;
5449f64dd462SJiri Olsa entry->direct = addr;
5450f64dd462SJiri Olsa }
5451f64dd462SJiri Olsa }
5452f64dd462SJiri Olsa
5453a12754a8SSteven Rostedt (Google) free_hash = direct_functions;
5454a12754a8SSteven Rostedt (Google) rcu_assign_pointer(direct_functions, new_hash);
5455a12754a8SSteven Rostedt (Google) new_hash = NULL;
5456a12754a8SSteven Rostedt (Google)
5457f64dd462SJiri Olsa ops->func = call_direct_funcs;
5458f64dd462SJiri Olsa ops->flags = MULTI_FLAGS;
5459f64dd462SJiri Olsa ops->trampoline = FTRACE_REGS_ADDR;
5460dbaccb61SFlorent Revest ops->direct_call = addr;
5461f64dd462SJiri Olsa
546253cd885bSSong Liu err = register_ftrace_function_nolock(ops);
5463f64dd462SJiri Olsa
5464f64dd462SJiri Olsa out_unlock:
5465f64dd462SJiri Olsa mutex_unlock(&direct_mutex);
5466f64dd462SJiri Olsa
5467a12754a8SSteven Rostedt (Google) if (free_hash && free_hash != EMPTY_HASH) {
5468f64dd462SJiri Olsa synchronize_rcu_tasks();
5469f64dd462SJiri Olsa free_ftrace_hash(free_hash);
5470f64dd462SJiri Olsa }
5471a12754a8SSteven Rostedt (Google)
5472a12754a8SSteven Rostedt (Google) if (new_hash)
5473a12754a8SSteven Rostedt (Google) free_ftrace_hash(new_hash);
5474a12754a8SSteven Rostedt (Google)
5475f64dd462SJiri Olsa return err;
5476f64dd462SJiri Olsa }
5477da8bdfbdSFlorent Revest EXPORT_SYMBOL_GPL(register_ftrace_direct);
5478f64dd462SJiri Olsa
5479f64dd462SJiri Olsa /**
5480da8bdfbdSFlorent Revest * unregister_ftrace_direct - Remove calls to custom trampoline
5481da8bdfbdSFlorent Revest * previously registered by register_ftrace_direct for @ops object.
5482f64dd462SJiri Olsa * @ops: The address of the struct ftrace_ops object
5483f64dd462SJiri Olsa *
5484f64dd462SJiri Olsa * This is used to remove a direct calls to @addr from the nop locations
5485f64dd462SJiri Olsa * of the functions registered in @ops (with by ftrace_set_filter_ip
5486f64dd462SJiri Olsa * function).
5487f64dd462SJiri Olsa *
5488f64dd462SJiri Olsa * Returns:
5489f64dd462SJiri Olsa * 0 on success
5490f64dd462SJiri Olsa * -EINVAL - The @ops object was not properly registered.
5491f64dd462SJiri Olsa */
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)5492da8bdfbdSFlorent Revest int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
549359495740SFlorent Revest bool free_filters)
5494f64dd462SJiri Olsa {
5495f64dd462SJiri Olsa struct ftrace_hash *hash = ops->func_hash->filter_hash;
5496f64dd462SJiri Olsa int err;
5497f64dd462SJiri Olsa
5498f64dd462SJiri Olsa if (check_direct_multi(ops))
5499f64dd462SJiri Olsa return -EINVAL;
5500f64dd462SJiri Olsa if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5501f64dd462SJiri Olsa return -EINVAL;
5502f64dd462SJiri Olsa
5503f64dd462SJiri Olsa mutex_lock(&direct_mutex);
5504f64dd462SJiri Olsa err = unregister_ftrace_function(ops);
5505f64dd462SJiri Olsa remove_direct_functions_hash(hash, addr);
5506f64dd462SJiri Olsa mutex_unlock(&direct_mutex);
5507fea3ffa4SJiri Olsa
5508fea3ffa4SJiri Olsa /* cleanup for possible another register call */
5509fea3ffa4SJiri Olsa ops->func = NULL;
5510fea3ffa4SJiri Olsa ops->trampoline = 0;
551159495740SFlorent Revest
551259495740SFlorent Revest if (free_filters)
551359495740SFlorent Revest ftrace_free_filter(ops);
5514f64dd462SJiri Olsa return err;
5515f64dd462SJiri Olsa }
5516da8bdfbdSFlorent Revest EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5517ccf5a89eSJiri Olsa
5518f96f644aSSong Liu static int
__modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)5519da8bdfbdSFlorent Revest __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5520ccf5a89eSJiri Olsa {
5521ed292718SSteven Rostedt (VMware) struct ftrace_hash *hash;
5522ccf5a89eSJiri Olsa struct ftrace_func_entry *entry, *iter;
5523ed292718SSteven Rostedt (VMware) static struct ftrace_ops tmp_ops = {
5524ed292718SSteven Rostedt (VMware) .func = ftrace_stub,
5525ed292718SSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_STUB,
5526ed292718SSteven Rostedt (VMware) };
5527ccf5a89eSJiri Olsa int i, size;
5528ccf5a89eSJiri Olsa int err;
5529ccf5a89eSJiri Olsa
5530f96f644aSSong Liu lockdep_assert_held_once(&direct_mutex);
5531ed292718SSteven Rostedt (VMware)
5532ed292718SSteven Rostedt (VMware) /* Enable the tmp_ops to have the same functions as the direct ops */
5533ed292718SSteven Rostedt (VMware) ftrace_ops_init(&tmp_ops);
5534ed292718SSteven Rostedt (VMware) tmp_ops.func_hash = ops->func_hash;
5535dbaccb61SFlorent Revest tmp_ops.direct_call = addr;
5536ed292718SSteven Rostedt (VMware)
553753cd885bSSong Liu err = register_ftrace_function_nolock(&tmp_ops);
5538ed292718SSteven Rostedt (VMware) if (err)
5539f96f644aSSong Liu return err;
5540ccf5a89eSJiri Olsa
5541ccf5a89eSJiri Olsa /*
5542ed292718SSteven Rostedt (VMware) * Now the ftrace_ops_list_func() is called to do the direct callers.
5543ed292718SSteven Rostedt (VMware) * We can safely change the direct functions attached to each entry.
5544ccf5a89eSJiri Olsa */
5545ed292718SSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
5546ccf5a89eSJiri Olsa
5547ed292718SSteven Rostedt (VMware) hash = ops->func_hash->filter_hash;
5548ccf5a89eSJiri Olsa size = 1 << hash->size_bits;
5549ccf5a89eSJiri Olsa for (i = 0; i < size; i++) {
5550ccf5a89eSJiri Olsa hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5551ccf5a89eSJiri Olsa entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5552ccf5a89eSJiri Olsa if (!entry)
5553ccf5a89eSJiri Olsa continue;
5554ccf5a89eSJiri Olsa entry->direct = addr;
5555ccf5a89eSJiri Olsa }
5556ccf5a89eSJiri Olsa }
5557dbaccb61SFlorent Revest /* Prevent store tearing if a trampoline concurrently accesses the value */
5558dbaccb61SFlorent Revest WRITE_ONCE(ops->direct_call, addr);
5559ccf5a89eSJiri Olsa
55602e6e9058SJiri Olsa mutex_unlock(&ftrace_lock);
55612e6e9058SJiri Olsa
5562ed292718SSteven Rostedt (VMware) /* Removing the tmp_ops will add the updated direct callers to the functions */
5563ed292718SSteven Rostedt (VMware) unregister_ftrace_function(&tmp_ops);
5564ccf5a89eSJiri Olsa
5565f96f644aSSong Liu return err;
5566f96f644aSSong Liu }
5567f96f644aSSong Liu
5568f96f644aSSong Liu /**
5569da8bdfbdSFlorent Revest * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
5570f96f644aSSong Liu * to call something else
5571f96f644aSSong Liu * @ops: The address of the struct ftrace_ops object
5572f96f644aSSong Liu * @addr: The address of the new trampoline to call at @ops functions
5573f96f644aSSong Liu *
5574f96f644aSSong Liu * This is used to unregister currently registered direct caller and
5575f96f644aSSong Liu * register new one @addr on functions registered in @ops object.
5576f96f644aSSong Liu *
5577f96f644aSSong Liu * Note there's window between ftrace_shutdown and ftrace_startup calls
5578f96f644aSSong Liu * where there will be no callbacks called.
5579f96f644aSSong Liu *
5580f96f644aSSong Liu * Caller should already have direct_mutex locked, so we don't lock
5581f96f644aSSong Liu * direct_mutex here.
5582f96f644aSSong Liu *
5583f96f644aSSong Liu * Returns: zero on success. Non zero on error, which includes:
5584f96f644aSSong Liu * -EINVAL - The @ops object was not properly registered.
5585f96f644aSSong Liu */
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)5586da8bdfbdSFlorent Revest int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
5587f96f644aSSong Liu {
5588f96f644aSSong Liu if (check_direct_multi(ops))
5589f96f644aSSong Liu return -EINVAL;
5590f96f644aSSong Liu if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5591f96f644aSSong Liu return -EINVAL;
5592f96f644aSSong Liu
5593da8bdfbdSFlorent Revest return __modify_ftrace_direct(ops, addr);
5594f96f644aSSong Liu }
5595da8bdfbdSFlorent Revest EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
5596f96f644aSSong Liu
5597f96f644aSSong Liu /**
5598da8bdfbdSFlorent Revest * modify_ftrace_direct - Modify an existing direct 'multi' call
5599f96f644aSSong Liu * to call something else
5600f96f644aSSong Liu * @ops: The address of the struct ftrace_ops object
5601f96f644aSSong Liu * @addr: The address of the new trampoline to call at @ops functions
5602f96f644aSSong Liu *
5603f96f644aSSong Liu * This is used to unregister currently registered direct caller and
5604f96f644aSSong Liu * register new one @addr on functions registered in @ops object.
5605f96f644aSSong Liu *
5606f96f644aSSong Liu * Note there's window between ftrace_shutdown and ftrace_startup calls
5607f96f644aSSong Liu * where there will be no callbacks called.
5608f96f644aSSong Liu *
5609f96f644aSSong Liu * Returns: zero on success. Non zero on error, which includes:
5610f96f644aSSong Liu * -EINVAL - The @ops object was not properly registered.
5611f96f644aSSong Liu */
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)5612da8bdfbdSFlorent Revest int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5613f96f644aSSong Liu {
5614f96f644aSSong Liu int err;
5615f96f644aSSong Liu
5616f96f644aSSong Liu if (check_direct_multi(ops))
5617f96f644aSSong Liu return -EINVAL;
5618f96f644aSSong Liu if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5619f96f644aSSong Liu return -EINVAL;
5620f96f644aSSong Liu
5621f96f644aSSong Liu mutex_lock(&direct_mutex);
5622da8bdfbdSFlorent Revest err = __modify_ftrace_direct(ops, addr);
5623ccf5a89eSJiri Olsa mutex_unlock(&direct_mutex);
5624ccf5a89eSJiri Olsa return err;
5625ccf5a89eSJiri Olsa }
5626da8bdfbdSFlorent Revest EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5627763e34e7SSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5628763e34e7SSteven Rostedt (VMware)
5629647664eaSMasami Hiramatsu /**
5630647664eaSMasami Hiramatsu * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5631647664eaSMasami Hiramatsu * @ops - the ops to set the filter with
5632647664eaSMasami Hiramatsu * @ip - the address to add to or remove from the filter.
5633647664eaSMasami Hiramatsu * @remove - non zero to remove the ip from the filter
5634647664eaSMasami Hiramatsu * @reset - non zero to reset all filters before applying this filter.
5635647664eaSMasami Hiramatsu *
5636647664eaSMasami Hiramatsu * Filters denote which functions should be enabled when tracing is enabled
5637f2cc020dSIngo Molnar * If @ip is NULL, it fails to update filter.
56388be9fbd5SMark Rutland *
56398be9fbd5SMark Rutland * This can allocate memory which must be freed before @ops can be freed,
56408be9fbd5SMark Rutland * either by removing each filtered addr or by using
56418be9fbd5SMark Rutland * ftrace_free_filter(@ops).
5642647664eaSMasami Hiramatsu */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)5643647664eaSMasami Hiramatsu int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5644647664eaSMasami Hiramatsu int remove, int reset)
5645647664eaSMasami Hiramatsu {
5646f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
56474f554e95SJiri Olsa return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5648647664eaSMasami Hiramatsu }
5649647664eaSMasami Hiramatsu EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5650647664eaSMasami Hiramatsu
5651d032ae89SJoel Fernandes /**
56524f554e95SJiri Olsa * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
56534f554e95SJiri Olsa * @ops - the ops to set the filter with
56544f554e95SJiri Olsa * @ips - the array of addresses to add to or remove from the filter.
56554f554e95SJiri Olsa * @cnt - the number of addresses in @ips
56564f554e95SJiri Olsa * @remove - non zero to remove ips from the filter
56574f554e95SJiri Olsa * @reset - non zero to reset all filters before applying this filter.
56584f554e95SJiri Olsa *
56594f554e95SJiri Olsa * Filters denote which functions should be enabled when tracing is enabled
56604f554e95SJiri Olsa * If @ips array or any ip specified within is NULL , it fails to update filter.
56618be9fbd5SMark Rutland *
56628be9fbd5SMark Rutland * This can allocate memory which must be freed before @ops can be freed,
56638be9fbd5SMark Rutland * either by removing each filtered addr or by using
56648be9fbd5SMark Rutland * ftrace_free_filter(@ops).
56654f554e95SJiri Olsa */
ftrace_set_filter_ips(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset)56664f554e95SJiri Olsa int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
56674f554e95SJiri Olsa unsigned int cnt, int remove, int reset)
56684f554e95SJiri Olsa {
56694f554e95SJiri Olsa ftrace_ops_init(ops);
56704f554e95SJiri Olsa return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
56714f554e95SJiri Olsa }
56724f554e95SJiri Olsa EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
56734f554e95SJiri Olsa
56744f554e95SJiri Olsa /**
5675d032ae89SJoel Fernandes * ftrace_ops_set_global_filter - setup ops to use global filters
5676d032ae89SJoel Fernandes * @ops - the ops which will use the global filters
5677d032ae89SJoel Fernandes *
5678d032ae89SJoel Fernandes * ftrace users who need global function trace filtering should call this.
5679d032ae89SJoel Fernandes * It can set the global filter only if ops were not initialized before.
5680d032ae89SJoel Fernandes */
ftrace_ops_set_global_filter(struct ftrace_ops * ops)5681d032ae89SJoel Fernandes void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5682d032ae89SJoel Fernandes {
5683d032ae89SJoel Fernandes if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5684d032ae89SJoel Fernandes return;
5685d032ae89SJoel Fernandes
5686d032ae89SJoel Fernandes ftrace_ops_init(ops);
5687d032ae89SJoel Fernandes ops->func_hash = &global_ops.local_hash;
5688d032ae89SJoel Fernandes }
5689d032ae89SJoel Fernandes EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5690d032ae89SJoel Fernandes
5691647664eaSMasami Hiramatsu static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)5692647664eaSMasami Hiramatsu ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5693647664eaSMasami Hiramatsu int reset, int enable)
5694647664eaSMasami Hiramatsu {
56954f554e95SJiri Olsa return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5696647664eaSMasami Hiramatsu }
5697647664eaSMasami Hiramatsu
569877a2b37dSSteven Rostedt /**
569977a2b37dSSteven Rostedt * ftrace_set_filter - set a function to filter on in ftrace
5700936e074bSSteven Rostedt * @ops - the ops to set the filter with
570177a2b37dSSteven Rostedt * @buf - the string that holds the function filter text.
570277a2b37dSSteven Rostedt * @len - the length of the string.
570377a2b37dSSteven Rostedt * @reset - non zero to reset all filters before applying this filter.
570477a2b37dSSteven Rostedt *
570577a2b37dSSteven Rostedt * Filters denote which functions should be enabled when tracing is enabled.
570677a2b37dSSteven Rostedt * If @buf is NULL and reset is set, all functions will be enabled for tracing.
57078be9fbd5SMark Rutland *
57088be9fbd5SMark Rutland * This can allocate memory which must be freed before @ops can be freed,
57098be9fbd5SMark Rutland * either by removing each filtered addr or by using
57108be9fbd5SMark Rutland * ftrace_free_filter(@ops).
571177a2b37dSSteven Rostedt */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)5712ac483c44SJiri Olsa int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5713936e074bSSteven Rostedt int len, int reset)
571477a2b37dSSteven Rostedt {
5715f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
5716ac483c44SJiri Olsa return ftrace_set_regex(ops, buf, len, reset, 1);
571741c52c0dSSteven Rostedt }
5718936e074bSSteven Rostedt EXPORT_SYMBOL_GPL(ftrace_set_filter);
57194eebcc81SSteven Rostedt
572041c52c0dSSteven Rostedt /**
572141c52c0dSSteven Rostedt * ftrace_set_notrace - set a function to not trace in ftrace
5722936e074bSSteven Rostedt * @ops - the ops to set the notrace filter with
572341c52c0dSSteven Rostedt * @buf - the string that holds the function notrace text.
572441c52c0dSSteven Rostedt * @len - the length of the string.
572541c52c0dSSteven Rostedt * @reset - non zero to reset all filters before applying this filter.
572641c52c0dSSteven Rostedt *
572741c52c0dSSteven Rostedt * Notrace Filters denote which functions should not be enabled when tracing
572841c52c0dSSteven Rostedt * is enabled. If @buf is NULL and reset is set, all functions will be enabled
572941c52c0dSSteven Rostedt * for tracing.
57308be9fbd5SMark Rutland *
57318be9fbd5SMark Rutland * This can allocate memory which must be freed before @ops can be freed,
57328be9fbd5SMark Rutland * either by removing each filtered addr or by using
57338be9fbd5SMark Rutland * ftrace_free_filter(@ops).
573441c52c0dSSteven Rostedt */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)5735ac483c44SJiri Olsa int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5736936e074bSSteven Rostedt int len, int reset)
5737936e074bSSteven Rostedt {
5738f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
5739ac483c44SJiri Olsa return ftrace_set_regex(ops, buf, len, reset, 0);
5740936e074bSSteven Rostedt }
5741936e074bSSteven Rostedt EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5742936e074bSSteven Rostedt /**
57438d1b065dSJiaxing Wang * ftrace_set_global_filter - set a function to filter on with global tracers
5744936e074bSSteven Rostedt * @buf - the string that holds the function filter text.
5745936e074bSSteven Rostedt * @len - the length of the string.
5746936e074bSSteven Rostedt * @reset - non zero to reset all filters before applying this filter.
5747936e074bSSteven Rostedt *
5748936e074bSSteven Rostedt * Filters denote which functions should be enabled when tracing is enabled.
5749936e074bSSteven Rostedt * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5750936e074bSSteven Rostedt */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)5751936e074bSSteven Rostedt void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5752936e074bSSteven Rostedt {
5753936e074bSSteven Rostedt ftrace_set_regex(&global_ops, buf, len, reset, 1);
5754936e074bSSteven Rostedt }
5755936e074bSSteven Rostedt EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5756936e074bSSteven Rostedt
5757936e074bSSteven Rostedt /**
57588d1b065dSJiaxing Wang * ftrace_set_global_notrace - set a function to not trace with global tracers
5759936e074bSSteven Rostedt * @buf - the string that holds the function notrace text.
5760936e074bSSteven Rostedt * @len - the length of the string.
5761936e074bSSteven Rostedt * @reset - non zero to reset all filters before applying this filter.
5762936e074bSSteven Rostedt *
5763936e074bSSteven Rostedt * Notrace Filters denote which functions should not be enabled when tracing
5764936e074bSSteven Rostedt * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5765936e074bSSteven Rostedt * for tracing.
5766936e074bSSteven Rostedt */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)5767936e074bSSteven Rostedt void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
576841c52c0dSSteven Rostedt {
5769f45948e8SSteven Rostedt ftrace_set_regex(&global_ops, buf, len, reset, 0);
577077a2b37dSSteven Rostedt }
5771936e074bSSteven Rostedt EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
577277a2b37dSSteven Rostedt
57732af15d6aSSteven Rostedt /*
57742af15d6aSSteven Rostedt * command line interface to allow users to set filters on boot up.
57752af15d6aSSteven Rostedt */
57762af15d6aSSteven Rostedt #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
57772af15d6aSSteven Rostedt static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
57782af15d6aSSteven Rostedt static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
57792af15d6aSSteven Rostedt
5780f1ed7c74SSteven Rostedt (Red Hat) /* Used by function selftest to not test if filter is set */
5781f1ed7c74SSteven Rostedt (Red Hat) bool ftrace_filter_param __initdata;
5782f1ed7c74SSteven Rostedt (Red Hat)
set_ftrace_notrace(char * str)57832af15d6aSSteven Rostedt static int __init set_ftrace_notrace(char *str)
57842af15d6aSSteven Rostedt {
5785f1ed7c74SSteven Rostedt (Red Hat) ftrace_filter_param = true;
5786d0c2d66fSAzeem Shaikh strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
57872af15d6aSSteven Rostedt return 1;
57882af15d6aSSteven Rostedt }
57892af15d6aSSteven Rostedt __setup("ftrace_notrace=", set_ftrace_notrace);
57902af15d6aSSteven Rostedt
set_ftrace_filter(char * str)57912af15d6aSSteven Rostedt static int __init set_ftrace_filter(char *str)
57922af15d6aSSteven Rostedt {
5793f1ed7c74SSteven Rostedt (Red Hat) ftrace_filter_param = true;
5794d0c2d66fSAzeem Shaikh strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
57952af15d6aSSteven Rostedt return 1;
57962af15d6aSSteven Rostedt }
57972af15d6aSSteven Rostedt __setup("ftrace_filter=", set_ftrace_filter);
57982af15d6aSSteven Rostedt
5799369bc18fSStefan Assmann #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5800f6060f46SLai Jiangshan static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
58010d7d9a16SNamhyung Kim static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5802b9b0c831SNamhyung Kim static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5803801c29fdSSteven Rostedt
set_graph_function(char * str)5804369bc18fSStefan Assmann static int __init set_graph_function(char *str)
5805369bc18fSStefan Assmann {
5806d0c2d66fSAzeem Shaikh strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5807369bc18fSStefan Assmann return 1;
5808369bc18fSStefan Assmann }
5809369bc18fSStefan Assmann __setup("ftrace_graph_filter=", set_graph_function);
5810369bc18fSStefan Assmann
set_graph_notrace_function(char * str)58110d7d9a16SNamhyung Kim static int __init set_graph_notrace_function(char *str)
58120d7d9a16SNamhyung Kim {
5813d0c2d66fSAzeem Shaikh strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
58140d7d9a16SNamhyung Kim return 1;
58150d7d9a16SNamhyung Kim }
58160d7d9a16SNamhyung Kim __setup("ftrace_graph_notrace=", set_graph_notrace_function);
58170d7d9a16SNamhyung Kim
set_graph_max_depth_function(char * str)581865a50c65STodd Brandt static int __init set_graph_max_depth_function(char *str)
581965a50c65STodd Brandt {
582065a50c65STodd Brandt if (!str)
582165a50c65STodd Brandt return 0;
582265a50c65STodd Brandt fgraph_max_depth = simple_strtoul(str, NULL, 0);
582365a50c65STodd Brandt return 1;
582465a50c65STodd Brandt }
582565a50c65STodd Brandt __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
582665a50c65STodd Brandt
set_ftrace_early_graph(char * buf,int enable)58270d7d9a16SNamhyung Kim static void __init set_ftrace_early_graph(char *buf, int enable)
5828369bc18fSStefan Assmann {
5829369bc18fSStefan Assmann int ret;
5830369bc18fSStefan Assmann char *func;
5831b9b0c831SNamhyung Kim struct ftrace_hash *hash;
58320d7d9a16SNamhyung Kim
583392ad18ecSSteven Rostedt (VMware) hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
583424589e3aSSteven Rostedt (VMware) if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
583592ad18ecSSteven Rostedt (VMware) return;
5836369bc18fSStefan Assmann
5837369bc18fSStefan Assmann while (buf) {
5838369bc18fSStefan Assmann func = strsep(&buf, ",");
5839369bc18fSStefan Assmann /* we allow only one expression at a time */
5840b9b0c831SNamhyung Kim ret = ftrace_graph_set_hash(hash, func);
5841369bc18fSStefan Assmann if (ret)
5842369bc18fSStefan Assmann printk(KERN_DEBUG "ftrace: function %s not "
5843369bc18fSStefan Assmann "traceable\n", func);
5844369bc18fSStefan Assmann }
584592ad18ecSSteven Rostedt (VMware)
584692ad18ecSSteven Rostedt (VMware) if (enable)
584792ad18ecSSteven Rostedt (VMware) ftrace_graph_hash = hash;
584892ad18ecSSteven Rostedt (VMware) else
584992ad18ecSSteven Rostedt (VMware) ftrace_graph_notrace_hash = hash;
5850369bc18fSStefan Assmann }
5851369bc18fSStefan Assmann #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5852369bc18fSStefan Assmann
58532a85a37fSSteven Rostedt void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)58542a85a37fSSteven Rostedt ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
58552af15d6aSSteven Rostedt {
58562af15d6aSSteven Rostedt char *func;
58572af15d6aSSteven Rostedt
5858f04f24fbSMasami Hiramatsu ftrace_ops_init(ops);
5859f04f24fbSMasami Hiramatsu
58602af15d6aSSteven Rostedt while (buf) {
58612af15d6aSSteven Rostedt func = strsep(&buf, ",");
5862f45948e8SSteven Rostedt ftrace_set_regex(ops, func, strlen(func), 0, enable);
58632af15d6aSSteven Rostedt }
58642af15d6aSSteven Rostedt }
58652af15d6aSSteven Rostedt
set_ftrace_early_filters(void)58662af15d6aSSteven Rostedt static void __init set_ftrace_early_filters(void)
58672af15d6aSSteven Rostedt {
58682af15d6aSSteven Rostedt if (ftrace_filter_buf[0])
58692a85a37fSSteven Rostedt ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
58702af15d6aSSteven Rostedt if (ftrace_notrace_buf[0])
58712a85a37fSSteven Rostedt ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5872369bc18fSStefan Assmann #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5873369bc18fSStefan Assmann if (ftrace_graph_buf[0])
58740d7d9a16SNamhyung Kim set_ftrace_early_graph(ftrace_graph_buf, 1);
58750d7d9a16SNamhyung Kim if (ftrace_graph_notrace_buf[0])
58760d7d9a16SNamhyung Kim set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5877369bc18fSStefan Assmann #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
58782af15d6aSSteven Rostedt }
58792af15d6aSSteven Rostedt
ftrace_regex_release(struct inode * inode,struct file * file)5880fc13cb0cSSteven Rostedt int ftrace_regex_release(struct inode *inode, struct file *file)
58815072c59fSSteven Rostedt {
58825072c59fSSteven Rostedt struct seq_file *m = (struct seq_file *)file->private_data;
58835072c59fSSteven Rostedt struct ftrace_iterator *iter;
588433dc9b12SSteven Rostedt struct ftrace_hash **orig_hash;
5885689fd8b6Sjolsa@redhat.com struct trace_parser *parser;
5886ed926f9bSSteven Rostedt int filter_hash;
58875072c59fSSteven Rostedt
58885072c59fSSteven Rostedt if (file->f_mode & FMODE_READ) {
58895072c59fSSteven Rostedt iter = m->private;
58905072c59fSSteven Rostedt seq_release(inode, file);
58915072c59fSSteven Rostedt } else
58925072c59fSSteven Rostedt iter = file->private_data;
58935072c59fSSteven Rostedt
5894689fd8b6Sjolsa@redhat.com parser = &iter->parser;
5895689fd8b6Sjolsa@redhat.com if (trace_parser_loaded(parser)) {
58968c9af478SSteven Rostedt (VMware) int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
58978c9af478SSteven Rostedt (VMware)
58988c9af478SSteven Rostedt (VMware) ftrace_process_regex(iter, parser->buffer,
58998c9af478SSteven Rostedt (VMware) parser->idx, enable);
59005072c59fSSteven Rostedt }
59015072c59fSSteven Rostedt
5902058e297dSSteven Rostedt trace_parser_put(parser);
5903058e297dSSteven Rostedt
590433b7f99cSSteven Rostedt (Red Hat) mutex_lock(&iter->ops->func_hash->regex_lock);
59053f2367baSMasami Hiramatsu
5906058e297dSSteven Rostedt if (file->f_mode & FMODE_WRITE) {
5907ed926f9bSSteven Rostedt filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5908ed926f9bSSteven Rostedt
59098c08f0d5SSteven Rostedt (VMware) if (filter_hash) {
591033b7f99cSSteven Rostedt (Red Hat) orig_hash = &iter->ops->func_hash->filter_hash;
59110ce0638eSZheng Yejian if (iter->tr) {
59120ce0638eSZheng Yejian if (list_empty(&iter->tr->mod_trace))
59130ce0638eSZheng Yejian iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
59140ce0638eSZheng Yejian else
59158c08f0d5SSteven Rostedt (VMware) iter->hash->flags |= FTRACE_HASH_FL_MOD;
59160ce0638eSZheng Yejian }
59178c08f0d5SSteven Rostedt (VMware) } else
591833b7f99cSSteven Rostedt (Red Hat) orig_hash = &iter->ops->func_hash->notrace_hash;
591933dc9b12SSteven Rostedt
5920e6ea44e9SSteven Rostedt mutex_lock(&ftrace_lock);
5921045e269cSAlex Shi ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5922e16b35ddSSteven Rostedt (VMware) iter->hash, filter_hash);
5923e6ea44e9SSteven Rostedt mutex_unlock(&ftrace_lock);
5924c20489daSSteven Rostedt (VMware) } else {
5925c20489daSSteven Rostedt (VMware) /* For read only, the hash is the ops hash */
5926c20489daSSteven Rostedt (VMware) iter->hash = NULL;
5927058e297dSSteven Rostedt }
59283f2367baSMasami Hiramatsu
592933b7f99cSSteven Rostedt (Red Hat) mutex_unlock(&iter->ops->func_hash->regex_lock);
593033dc9b12SSteven Rostedt free_ftrace_hash(iter->hash);
59319ef16693SSteven Rostedt (VMware) if (iter->tr)
59329ef16693SSteven Rostedt (VMware) trace_array_put(iter->tr);
593333dc9b12SSteven Rostedt kfree(iter);
5934689fd8b6Sjolsa@redhat.com
59355072c59fSSteven Rostedt return 0;
59365072c59fSSteven Rostedt }
59375072c59fSSteven Rostedt
59385e2336a0SSteven Rostedt static const struct file_operations ftrace_avail_fops = {
59395072c59fSSteven Rostedt .open = ftrace_avail_open,
59405072c59fSSteven Rostedt .read = seq_read,
59415072c59fSSteven Rostedt .llseek = seq_lseek,
59423be04b47SLi Zefan .release = seq_release_private,
59435072c59fSSteven Rostedt };
59445072c59fSSteven Rostedt
5945647bcd03SSteven Rostedt static const struct file_operations ftrace_enabled_fops = {
5946647bcd03SSteven Rostedt .open = ftrace_enabled_open,
5947647bcd03SSteven Rostedt .read = seq_read,
5948647bcd03SSteven Rostedt .llseek = seq_lseek,
5949647bcd03SSteven Rostedt .release = seq_release_private,
5950647bcd03SSteven Rostedt };
5951647bcd03SSteven Rostedt
5952e11b521aSSteven Rostedt (Google) static const struct file_operations ftrace_touched_fops = {
5953e11b521aSSteven Rostedt (Google) .open = ftrace_touched_open,
5954e11b521aSSteven Rostedt (Google) .read = seq_read,
5955e11b521aSSteven Rostedt (Google) .llseek = seq_lseek,
5956e11b521aSSteven Rostedt (Google) .release = seq_release_private,
5957e11b521aSSteven Rostedt (Google) };
5958e11b521aSSteven Rostedt (Google)
595983f74441SJiri Olsa static const struct file_operations ftrace_avail_addrs_fops = {
596083f74441SJiri Olsa .open = ftrace_avail_addrs_open,
596183f74441SJiri Olsa .read = seq_read,
596283f74441SJiri Olsa .llseek = seq_lseek,
596383f74441SJiri Olsa .release = seq_release_private,
596483f74441SJiri Olsa };
596583f74441SJiri Olsa
59665e2336a0SSteven Rostedt static const struct file_operations ftrace_filter_fops = {
59675072c59fSSteven Rostedt .open = ftrace_filter_open,
5968850a80cfSLai Jiangshan .read = seq_read,
59695072c59fSSteven Rostedt .write = ftrace_filter_write,
5970098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
59711cf41dd7SSteven Rostedt .release = ftrace_regex_release,
59725072c59fSSteven Rostedt };
59735072c59fSSteven Rostedt
59745e2336a0SSteven Rostedt static const struct file_operations ftrace_notrace_fops = {
597541c52c0dSSteven Rostedt .open = ftrace_notrace_open,
5976850a80cfSLai Jiangshan .read = seq_read,
597741c52c0dSSteven Rostedt .write = ftrace_notrace_write,
5978098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
59791cf41dd7SSteven Rostedt .release = ftrace_regex_release,
598041c52c0dSSteven Rostedt };
598141c52c0dSSteven Rostedt
5982ea4e2bc4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5983ea4e2bc4SSteven Rostedt
5984ea4e2bc4SSteven Rostedt static DEFINE_MUTEX(graph_lock);
5985ea4e2bc4SSteven Rostedt
598624a9729fSAmol Grover struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5987fd0e6852SAmol Grover struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5988b9b0c831SNamhyung Kim
5989b9b0c831SNamhyung Kim enum graph_filter_type {
5990b9b0c831SNamhyung Kim GRAPH_FILTER_NOTRACE = 0,
5991b9b0c831SNamhyung Kim GRAPH_FILTER_FUNCTION,
5992b9b0c831SNamhyung Kim };
5993ea4e2bc4SSteven Rostedt
5994555fc781SSteven Rostedt (VMware) #define FTRACE_GRAPH_EMPTY ((void *)1)
5995555fc781SSteven Rostedt (VMware)
5996faf982a6SNamhyung Kim struct ftrace_graph_data {
5997b9b0c831SNamhyung Kim struct ftrace_hash *hash;
5998b9b0c831SNamhyung Kim struct ftrace_func_entry *entry;
5999b9b0c831SNamhyung Kim int idx; /* for hash table iteration */
6000b9b0c831SNamhyung Kim enum graph_filter_type type;
6001b9b0c831SNamhyung Kim struct ftrace_hash *new_hash;
6002faf982a6SNamhyung Kim const struct seq_operations *seq_ops;
6003e704eff3SSteven Rostedt (VMware) struct trace_parser parser;
6004faf982a6SNamhyung Kim };
6005faf982a6SNamhyung Kim
6006ea4e2bc4SSteven Rostedt static void *
__g_next(struct seq_file * m,loff_t * pos)600785951842SLi Zefan __g_next(struct seq_file *m, loff_t *pos)
6008ea4e2bc4SSteven Rostedt {
6009faf982a6SNamhyung Kim struct ftrace_graph_data *fgd = m->private;
6010b9b0c831SNamhyung Kim struct ftrace_func_entry *entry = fgd->entry;
6011b9b0c831SNamhyung Kim struct hlist_head *head;
6012b9b0c831SNamhyung Kim int i, idx = fgd->idx;
6013faf982a6SNamhyung Kim
6014b9b0c831SNamhyung Kim if (*pos >= fgd->hash->count)
6015ea4e2bc4SSteven Rostedt return NULL;
6016b9b0c831SNamhyung Kim
6017b9b0c831SNamhyung Kim if (entry) {
6018b9b0c831SNamhyung Kim hlist_for_each_entry_continue(entry, hlist) {
6019b9b0c831SNamhyung Kim fgd->entry = entry;
6020b9b0c831SNamhyung Kim return entry;
6021b9b0c831SNamhyung Kim }
6022b9b0c831SNamhyung Kim
6023b9b0c831SNamhyung Kim idx++;
6024b9b0c831SNamhyung Kim }
6025b9b0c831SNamhyung Kim
6026b9b0c831SNamhyung Kim for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6027b9b0c831SNamhyung Kim head = &fgd->hash->buckets[i];
6028b9b0c831SNamhyung Kim hlist_for_each_entry(entry, head, hlist) {
6029b9b0c831SNamhyung Kim fgd->entry = entry;
6030b9b0c831SNamhyung Kim fgd->idx = i;
6031b9b0c831SNamhyung Kim return entry;
6032b9b0c831SNamhyung Kim }
6033b9b0c831SNamhyung Kim }
6034b9b0c831SNamhyung Kim return NULL;
603585951842SLi Zefan }
6036ea4e2bc4SSteven Rostedt
603785951842SLi Zefan static void *
g_next(struct seq_file * m,void * v,loff_t * pos)603885951842SLi Zefan g_next(struct seq_file *m, void *v, loff_t *pos)
603985951842SLi Zefan {
604085951842SLi Zefan (*pos)++;
604185951842SLi Zefan return __g_next(m, pos);
6042ea4e2bc4SSteven Rostedt }
6043ea4e2bc4SSteven Rostedt
g_start(struct seq_file * m,loff_t * pos)6044ea4e2bc4SSteven Rostedt static void *g_start(struct seq_file *m, loff_t *pos)
6045ea4e2bc4SSteven Rostedt {
6046faf982a6SNamhyung Kim struct ftrace_graph_data *fgd = m->private;
6047faf982a6SNamhyung Kim
6048ea4e2bc4SSteven Rostedt mutex_lock(&graph_lock);
6049ea4e2bc4SSteven Rostedt
6050649b988bSSteven Rostedt (VMware) if (fgd->type == GRAPH_FILTER_FUNCTION)
6051649b988bSSteven Rostedt (VMware) fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6052649b988bSSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6053649b988bSSteven Rostedt (VMware) else
6054649b988bSSteven Rostedt (VMware) fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6055649b988bSSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6056649b988bSSteven Rostedt (VMware)
6057f9349a8fSFrederic Weisbecker /* Nothing, tell g_show to print all functions are enabled */
6058b9b0c831SNamhyung Kim if (ftrace_hash_empty(fgd->hash) && !*pos)
6059555fc781SSteven Rostedt (VMware) return FTRACE_GRAPH_EMPTY;
6060f9349a8fSFrederic Weisbecker
6061b9b0c831SNamhyung Kim fgd->idx = 0;
6062b9b0c831SNamhyung Kim fgd->entry = NULL;
606385951842SLi Zefan return __g_next(m, pos);
6064ea4e2bc4SSteven Rostedt }
6065ea4e2bc4SSteven Rostedt
g_stop(struct seq_file * m,void * p)6066ea4e2bc4SSteven Rostedt static void g_stop(struct seq_file *m, void *p)
6067ea4e2bc4SSteven Rostedt {
6068ea4e2bc4SSteven Rostedt mutex_unlock(&graph_lock);
6069ea4e2bc4SSteven Rostedt }
6070ea4e2bc4SSteven Rostedt
g_show(struct seq_file * m,void * v)6071ea4e2bc4SSteven Rostedt static int g_show(struct seq_file *m, void *v)
6072ea4e2bc4SSteven Rostedt {
6073b9b0c831SNamhyung Kim struct ftrace_func_entry *entry = v;
6074ea4e2bc4SSteven Rostedt
6075b9b0c831SNamhyung Kim if (!entry)
6076ea4e2bc4SSteven Rostedt return 0;
6077ea4e2bc4SSteven Rostedt
6078555fc781SSteven Rostedt (VMware) if (entry == FTRACE_GRAPH_EMPTY) {
6079280d1429SNamhyung Kim struct ftrace_graph_data *fgd = m->private;
6080280d1429SNamhyung Kim
6081b9b0c831SNamhyung Kim if (fgd->type == GRAPH_FILTER_FUNCTION)
6082fa6f0cc7SRasmus Villemoes seq_puts(m, "#### all functions enabled ####\n");
6083280d1429SNamhyung Kim else
6084fa6f0cc7SRasmus Villemoes seq_puts(m, "#### no functions disabled ####\n");
6085f9349a8fSFrederic Weisbecker return 0;
6086f9349a8fSFrederic Weisbecker }
6087f9349a8fSFrederic Weisbecker
6088b9b0c831SNamhyung Kim seq_printf(m, "%ps\n", (void *)entry->ip);
6089ea4e2bc4SSteven Rostedt
6090ea4e2bc4SSteven Rostedt return 0;
6091ea4e2bc4SSteven Rostedt }
6092ea4e2bc4SSteven Rostedt
609388e9d34cSJames Morris static const struct seq_operations ftrace_graph_seq_ops = {
6094ea4e2bc4SSteven Rostedt .start = g_start,
6095ea4e2bc4SSteven Rostedt .next = g_next,
6096ea4e2bc4SSteven Rostedt .stop = g_stop,
6097ea4e2bc4SSteven Rostedt .show = g_show,
6098ea4e2bc4SSteven Rostedt };
6099ea4e2bc4SSteven Rostedt
6100ea4e2bc4SSteven Rostedt static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)6101faf982a6SNamhyung Kim __ftrace_graph_open(struct inode *inode, struct file *file,
6102faf982a6SNamhyung Kim struct ftrace_graph_data *fgd)
6103ea4e2bc4SSteven Rostedt {
610417911ff3SSteven Rostedt (VMware) int ret;
6105b9b0c831SNamhyung Kim struct ftrace_hash *new_hash = NULL;
6106ea4e2bc4SSteven Rostedt
610717911ff3SSteven Rostedt (VMware) ret = security_locked_down(LOCKDOWN_TRACEFS);
610817911ff3SSteven Rostedt (VMware) if (ret)
610917911ff3SSteven Rostedt (VMware) return ret;
611017911ff3SSteven Rostedt (VMware)
6111b9b0c831SNamhyung Kim if (file->f_mode & FMODE_WRITE) {
6112b9b0c831SNamhyung Kim const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6113b9b0c831SNamhyung Kim
6114e704eff3SSteven Rostedt (VMware) if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6115e704eff3SSteven Rostedt (VMware) return -ENOMEM;
6116e704eff3SSteven Rostedt (VMware)
6117b9b0c831SNamhyung Kim if (file->f_flags & O_TRUNC)
6118b9b0c831SNamhyung Kim new_hash = alloc_ftrace_hash(size_bits);
6119b9b0c831SNamhyung Kim else
6120b9b0c831SNamhyung Kim new_hash = alloc_and_copy_ftrace_hash(size_bits,
6121b9b0c831SNamhyung Kim fgd->hash);
6122b9b0c831SNamhyung Kim if (!new_hash) {
6123b9b0c831SNamhyung Kim ret = -ENOMEM;
6124b9b0c831SNamhyung Kim goto out;
6125ea4e2bc4SSteven Rostedt }
6126b9b0c831SNamhyung Kim }
6127ea4e2bc4SSteven Rostedt
6128faf982a6SNamhyung Kim if (file->f_mode & FMODE_READ) {
6129b9b0c831SNamhyung Kim ret = seq_open(file, &ftrace_graph_seq_ops);
6130faf982a6SNamhyung Kim if (!ret) {
6131faf982a6SNamhyung Kim struct seq_file *m = file->private_data;
6132faf982a6SNamhyung Kim m->private = fgd;
6133b9b0c831SNamhyung Kim } else {
6134b9b0c831SNamhyung Kim /* Failed */
6135b9b0c831SNamhyung Kim free_ftrace_hash(new_hash);
6136b9b0c831SNamhyung Kim new_hash = NULL;
6137faf982a6SNamhyung Kim }
6138faf982a6SNamhyung Kim } else
6139faf982a6SNamhyung Kim file->private_data = fgd;
6140a4ec5e0cSLi Zefan
6141b9b0c831SNamhyung Kim out:
6142e704eff3SSteven Rostedt (VMware) if (ret < 0 && file->f_mode & FMODE_WRITE)
6143e704eff3SSteven Rostedt (VMware) trace_parser_put(&fgd->parser);
6144e704eff3SSteven Rostedt (VMware)
6145b9b0c831SNamhyung Kim fgd->new_hash = new_hash;
6146649b988bSSteven Rostedt (VMware)
6147649b988bSSteven Rostedt (VMware) /*
6148649b988bSSteven Rostedt (VMware) * All uses of fgd->hash must be taken with the graph_lock
6149649b988bSSteven Rostedt (VMware) * held. The graph_lock is going to be released, so force
6150649b988bSSteven Rostedt (VMware) * fgd->hash to be reinitialized when it is taken again.
6151649b988bSSteven Rostedt (VMware) */
6152649b988bSSteven Rostedt (VMware) fgd->hash = NULL;
6153649b988bSSteven Rostedt (VMware)
6154ea4e2bc4SSteven Rostedt return ret;
6155ea4e2bc4SSteven Rostedt }
6156ea4e2bc4SSteven Rostedt
6157ea4e2bc4SSteven Rostedt static int
ftrace_graph_open(struct inode * inode,struct file * file)6158faf982a6SNamhyung Kim ftrace_graph_open(struct inode *inode, struct file *file)
6159faf982a6SNamhyung Kim {
6160faf982a6SNamhyung Kim struct ftrace_graph_data *fgd;
6161b9b0c831SNamhyung Kim int ret;
6162faf982a6SNamhyung Kim
6163faf982a6SNamhyung Kim if (unlikely(ftrace_disabled))
6164faf982a6SNamhyung Kim return -ENODEV;
6165faf982a6SNamhyung Kim
6166faf982a6SNamhyung Kim fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6167faf982a6SNamhyung Kim if (fgd == NULL)
6168faf982a6SNamhyung Kim return -ENOMEM;
6169faf982a6SNamhyung Kim
6170b9b0c831SNamhyung Kim mutex_lock(&graph_lock);
6171b9b0c831SNamhyung Kim
6172649b988bSSteven Rostedt (VMware) fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6173649b988bSSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6174b9b0c831SNamhyung Kim fgd->type = GRAPH_FILTER_FUNCTION;
6175faf982a6SNamhyung Kim fgd->seq_ops = &ftrace_graph_seq_ops;
6176faf982a6SNamhyung Kim
6177b9b0c831SNamhyung Kim ret = __ftrace_graph_open(inode, file, fgd);
6178b9b0c831SNamhyung Kim if (ret < 0)
6179b9b0c831SNamhyung Kim kfree(fgd);
6180b9b0c831SNamhyung Kim
6181b9b0c831SNamhyung Kim mutex_unlock(&graph_lock);
6182b9b0c831SNamhyung Kim return ret;
6183faf982a6SNamhyung Kim }
6184faf982a6SNamhyung Kim
6185faf982a6SNamhyung Kim static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)618629ad23b0SNamhyung Kim ftrace_graph_notrace_open(struct inode *inode, struct file *file)
618729ad23b0SNamhyung Kim {
618829ad23b0SNamhyung Kim struct ftrace_graph_data *fgd;
6189b9b0c831SNamhyung Kim int ret;
619029ad23b0SNamhyung Kim
619129ad23b0SNamhyung Kim if (unlikely(ftrace_disabled))
619229ad23b0SNamhyung Kim return -ENODEV;
619329ad23b0SNamhyung Kim
619429ad23b0SNamhyung Kim fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
619529ad23b0SNamhyung Kim if (fgd == NULL)
619629ad23b0SNamhyung Kim return -ENOMEM;
619729ad23b0SNamhyung Kim
6198b9b0c831SNamhyung Kim mutex_lock(&graph_lock);
6199b9b0c831SNamhyung Kim
6200649b988bSSteven Rostedt (VMware) fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6201649b988bSSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6202b9b0c831SNamhyung Kim fgd->type = GRAPH_FILTER_NOTRACE;
620329ad23b0SNamhyung Kim fgd->seq_ops = &ftrace_graph_seq_ops;
620429ad23b0SNamhyung Kim
6205b9b0c831SNamhyung Kim ret = __ftrace_graph_open(inode, file, fgd);
6206b9b0c831SNamhyung Kim if (ret < 0)
6207b9b0c831SNamhyung Kim kfree(fgd);
6208b9b0c831SNamhyung Kim
6209b9b0c831SNamhyung Kim mutex_unlock(&graph_lock);
6210b9b0c831SNamhyung Kim return ret;
621129ad23b0SNamhyung Kim }
621229ad23b0SNamhyung Kim
621329ad23b0SNamhyung Kim static int
ftrace_graph_release(struct inode * inode,struct file * file)621487827111SLi Zefan ftrace_graph_release(struct inode *inode, struct file *file)
621587827111SLi Zefan {
6216b9b0c831SNamhyung Kim struct ftrace_graph_data *fgd;
6217e704eff3SSteven Rostedt (VMware) struct ftrace_hash *old_hash, *new_hash;
6218e704eff3SSteven Rostedt (VMware) struct trace_parser *parser;
6219e704eff3SSteven Rostedt (VMware) int ret = 0;
6220b9b0c831SNamhyung Kim
6221faf982a6SNamhyung Kim if (file->f_mode & FMODE_READ) {
6222faf982a6SNamhyung Kim struct seq_file *m = file->private_data;
6223faf982a6SNamhyung Kim
6224b9b0c831SNamhyung Kim fgd = m->private;
622587827111SLi Zefan seq_release(inode, file);
6226faf982a6SNamhyung Kim } else {
6227b9b0c831SNamhyung Kim fgd = file->private_data;
6228faf982a6SNamhyung Kim }
6229faf982a6SNamhyung Kim
6230e704eff3SSteven Rostedt (VMware)
6231e704eff3SSteven Rostedt (VMware) if (file->f_mode & FMODE_WRITE) {
6232e704eff3SSteven Rostedt (VMware)
6233e704eff3SSteven Rostedt (VMware) parser = &fgd->parser;
6234e704eff3SSteven Rostedt (VMware)
6235e704eff3SSteven Rostedt (VMware) if (trace_parser_loaded((parser))) {
6236e704eff3SSteven Rostedt (VMware) ret = ftrace_graph_set_hash(fgd->new_hash,
6237e704eff3SSteven Rostedt (VMware) parser->buffer);
6238e704eff3SSteven Rostedt (VMware) }
6239e704eff3SSteven Rostedt (VMware)
6240e704eff3SSteven Rostedt (VMware) trace_parser_put(parser);
6241e704eff3SSteven Rostedt (VMware)
6242e704eff3SSteven Rostedt (VMware) new_hash = __ftrace_hash_move(fgd->new_hash);
6243e704eff3SSteven Rostedt (VMware) if (!new_hash) {
6244e704eff3SSteven Rostedt (VMware) ret = -ENOMEM;
6245e704eff3SSteven Rostedt (VMware) goto out;
6246e704eff3SSteven Rostedt (VMware) }
6247e704eff3SSteven Rostedt (VMware)
6248e704eff3SSteven Rostedt (VMware) mutex_lock(&graph_lock);
6249e704eff3SSteven Rostedt (VMware)
6250e704eff3SSteven Rostedt (VMware) if (fgd->type == GRAPH_FILTER_FUNCTION) {
6251e704eff3SSteven Rostedt (VMware) old_hash = rcu_dereference_protected(ftrace_graph_hash,
6252e704eff3SSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6253e704eff3SSteven Rostedt (VMware) rcu_assign_pointer(ftrace_graph_hash, new_hash);
6254e704eff3SSteven Rostedt (VMware) } else {
6255e704eff3SSteven Rostedt (VMware) old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6256e704eff3SSteven Rostedt (VMware) lockdep_is_held(&graph_lock));
6257e704eff3SSteven Rostedt (VMware) rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6258e704eff3SSteven Rostedt (VMware) }
6259e704eff3SSteven Rostedt (VMware)
6260e704eff3SSteven Rostedt (VMware) mutex_unlock(&graph_lock);
6261e704eff3SSteven Rostedt (VMware)
626254a16ff6SSteven Rostedt (VMware) /*
626354a16ff6SSteven Rostedt (VMware) * We need to do a hard force of sched synchronization.
626454a16ff6SSteven Rostedt (VMware) * This is because we use preempt_disable() to do RCU, but
626554a16ff6SSteven Rostedt (VMware) * the function tracers can be called where RCU is not watching
626654a16ff6SSteven Rostedt (VMware) * (like before user_exit()). We can not rely on the RCU
626754a16ff6SSteven Rostedt (VMware) * infrastructure to do the synchronization, thus we must do it
626854a16ff6SSteven Rostedt (VMware) * ourselves.
626954a16ff6SSteven Rostedt (VMware) */
627068e83498SNicolas Saenz Julienne if (old_hash != EMPTY_HASH)
6271e5a971d7SPaul E. McKenney synchronize_rcu_tasks_rude();
6272e704eff3SSteven Rostedt (VMware)
6273e704eff3SSteven Rostedt (VMware) free_ftrace_hash(old_hash);
6274e704eff3SSteven Rostedt (VMware) }
6275e704eff3SSteven Rostedt (VMware)
6276e704eff3SSteven Rostedt (VMware) out:
6277f9797c2fSLuis Henriques free_ftrace_hash(fgd->new_hash);
6278b9b0c831SNamhyung Kim kfree(fgd);
6279b9b0c831SNamhyung Kim
6280e704eff3SSteven Rostedt (VMware) return ret;
628187827111SLi Zefan }
628287827111SLi Zefan
628387827111SLi Zefan static int
ftrace_graph_set_hash(struct ftrace_hash * hash,char * buffer)6284b9b0c831SNamhyung Kim ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6285ea4e2bc4SSteven Rostedt {
62863ba00929SDmitry Safonov struct ftrace_glob func_g;
6287ea4e2bc4SSteven Rostedt struct dyn_ftrace *rec;
6288ea4e2bc4SSteven Rostedt struct ftrace_page *pg;
6289b9b0c831SNamhyung Kim struct ftrace_func_entry *entry;
6290c7c6b1feSLi Zefan int fail = 1;
62913ba00929SDmitry Safonov int not;
6292ea4e2bc4SSteven Rostedt
6293f9349a8fSFrederic Weisbecker /* decode regex */
62943ba00929SDmitry Safonov func_g.type = filter_parse_regex(buffer, strlen(buffer),
62953ba00929SDmitry Safonov &func_g.search, ¬);
6296f9349a8fSFrederic Weisbecker
62973ba00929SDmitry Safonov func_g.len = strlen(func_g.search);
6298f9349a8fSFrederic Weisbecker
629952baf119SSteven Rostedt mutex_lock(&ftrace_lock);
630045a4a237SSteven Rostedt
630145a4a237SSteven Rostedt if (unlikely(ftrace_disabled)) {
630245a4a237SSteven Rostedt mutex_unlock(&ftrace_lock);
630345a4a237SSteven Rostedt return -ENODEV;
630445a4a237SSteven Rostedt }
630545a4a237SSteven Rostedt
6306265c831cSSteven Rostedt do_for_each_ftrace_rec(pg, rec) {
6307ea4e2bc4SSteven Rostedt
6308546fece4SSteven Rostedt (Red Hat) if (rec->flags & FTRACE_FL_DISABLED)
6309546fece4SSteven Rostedt (Red Hat) continue;
6310546fece4SSteven Rostedt (Red Hat)
63110b507e1eSDmitry Safonov if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6312b9b0c831SNamhyung Kim entry = ftrace_lookup_ip(hash, rec->ip);
6313c7c6b1feSLi Zefan
6314c7c6b1feSLi Zefan if (!not) {
6315c7c6b1feSLi Zefan fail = 0;
6316b9b0c831SNamhyung Kim
6317b9b0c831SNamhyung Kim if (entry)
6318b9b0c831SNamhyung Kim continue;
6319a12754a8SSteven Rostedt (Google) if (add_hash_entry(hash, rec->ip) == NULL)
6320c7c6b1feSLi Zefan goto out;
6321c7c6b1feSLi Zefan } else {
6322b9b0c831SNamhyung Kim if (entry) {
6323b9b0c831SNamhyung Kim free_hash_entry(hash, entry);
6324c7c6b1feSLi Zefan fail = 0;
6325c7c6b1feSLi Zefan }
6326c7c6b1feSLi Zefan }
6327f9349a8fSFrederic Weisbecker }
6328265c831cSSteven Rostedt } while_for_each_ftrace_rec();
6329c7c6b1feSLi Zefan out:
633052baf119SSteven Rostedt mutex_unlock(&ftrace_lock);
6331ea4e2bc4SSteven Rostedt
6332c7c6b1feSLi Zefan if (fail)
6333c7c6b1feSLi Zefan return -EINVAL;
6334c7c6b1feSLi Zefan
6335c7c6b1feSLi Zefan return 0;
6336ea4e2bc4SSteven Rostedt }
6337ea4e2bc4SSteven Rostedt
6338ea4e2bc4SSteven Rostedt static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)6339ea4e2bc4SSteven Rostedt ftrace_graph_write(struct file *file, const char __user *ubuf,
6340ea4e2bc4SSteven Rostedt size_t cnt, loff_t *ppos)
6341ea4e2bc4SSteven Rostedt {
63426a10108bSNamhyung Kim ssize_t read, ret = 0;
6343faf982a6SNamhyung Kim struct ftrace_graph_data *fgd = file->private_data;
6344e704eff3SSteven Rostedt (VMware) struct trace_parser *parser;
6345ea4e2bc4SSteven Rostedt
6346c7c6b1feSLi Zefan if (!cnt)
6347ea4e2bc4SSteven Rostedt return 0;
6348ea4e2bc4SSteven Rostedt
6349ae98d27aSSteven Rostedt (VMware) /* Read mode uses seq functions */
6350ae98d27aSSteven Rostedt (VMware) if (file->f_mode & FMODE_READ) {
6351ae98d27aSSteven Rostedt (VMware) struct seq_file *m = file->private_data;
6352ae98d27aSSteven Rostedt (VMware) fgd = m->private;
6353ae98d27aSSteven Rostedt (VMware) }
6354ae98d27aSSteven Rostedt (VMware)
6355e704eff3SSteven Rostedt (VMware) parser = &fgd->parser;
6356689fd8b6Sjolsa@redhat.com
6357e704eff3SSteven Rostedt (VMware) read = trace_get_user(parser, ubuf, cnt, ppos);
6358ea4e2bc4SSteven Rostedt
6359e704eff3SSteven Rostedt (VMware) if (read >= 0 && trace_parser_loaded(parser) &&
6360e704eff3SSteven Rostedt (VMware) !trace_parser_cont(parser)) {
63616a10108bSNamhyung Kim
6362b9b0c831SNamhyung Kim ret = ftrace_graph_set_hash(fgd->new_hash,
6363e704eff3SSteven Rostedt (VMware) parser->buffer);
6364e704eff3SSteven Rostedt (VMware) trace_parser_clear(parser);
6365689fd8b6Sjolsa@redhat.com }
6366ea4e2bc4SSteven Rostedt
63676a10108bSNamhyung Kim if (!ret)
6368ea4e2bc4SSteven Rostedt ret = read;
63691eb90f13SLi Zefan
6370ea4e2bc4SSteven Rostedt return ret;
6371ea4e2bc4SSteven Rostedt }
6372ea4e2bc4SSteven Rostedt
6373ea4e2bc4SSteven Rostedt static const struct file_operations ftrace_graph_fops = {
6374ea4e2bc4SSteven Rostedt .open = ftrace_graph_open,
6375850a80cfSLai Jiangshan .read = seq_read,
6376ea4e2bc4SSteven Rostedt .write = ftrace_graph_write,
6377098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
637887827111SLi Zefan .release = ftrace_graph_release,
6379ea4e2bc4SSteven Rostedt };
638029ad23b0SNamhyung Kim
638129ad23b0SNamhyung Kim static const struct file_operations ftrace_graph_notrace_fops = {
638229ad23b0SNamhyung Kim .open = ftrace_graph_notrace_open,
638329ad23b0SNamhyung Kim .read = seq_read,
638429ad23b0SNamhyung Kim .write = ftrace_graph_write,
6385098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
638629ad23b0SNamhyung Kim .release = ftrace_graph_release,
638729ad23b0SNamhyung Kim };
6388ea4e2bc4SSteven Rostedt #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6389ea4e2bc4SSteven Rostedt
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)6390591dffdaSSteven Rostedt (Red Hat) void ftrace_create_filter_files(struct ftrace_ops *ops,
6391591dffdaSSteven Rostedt (Red Hat) struct dentry *parent)
6392591dffdaSSteven Rostedt (Red Hat) {
6393591dffdaSSteven Rostedt (Red Hat)
639421ccc9cdSSteven Rostedt (VMware) trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6395591dffdaSSteven Rostedt (Red Hat) ops, &ftrace_filter_fops);
6396591dffdaSSteven Rostedt (Red Hat)
639721ccc9cdSSteven Rostedt (VMware) trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6398591dffdaSSteven Rostedt (Red Hat) ops, &ftrace_notrace_fops);
6399591dffdaSSteven Rostedt (Red Hat) }
6400591dffdaSSteven Rostedt (Red Hat)
6401591dffdaSSteven Rostedt (Red Hat) /*
6402591dffdaSSteven Rostedt (Red Hat) * The name "destroy_filter_files" is really a misnomer. Although
64039efb85c5SHariprasad Kelam * in the future, it may actually delete the files, but this is
6404591dffdaSSteven Rostedt (Red Hat) * really intended to make sure the ops passed in are disabled
6405591dffdaSSteven Rostedt (Red Hat) * and that when this function returns, the caller is free to
6406591dffdaSSteven Rostedt (Red Hat) * free the ops.
6407591dffdaSSteven Rostedt (Red Hat) *
6408591dffdaSSteven Rostedt (Red Hat) * The "destroy" name is only to match the "create" name that this
6409591dffdaSSteven Rostedt (Red Hat) * should be paired with.
6410591dffdaSSteven Rostedt (Red Hat) */
ftrace_destroy_filter_files(struct ftrace_ops * ops)6411591dffdaSSteven Rostedt (Red Hat) void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6412591dffdaSSteven Rostedt (Red Hat) {
6413591dffdaSSteven Rostedt (Red Hat) mutex_lock(&ftrace_lock);
6414591dffdaSSteven Rostedt (Red Hat) if (ops->flags & FTRACE_OPS_FL_ENABLED)
6415591dffdaSSteven Rostedt (Red Hat) ftrace_shutdown(ops, 0);
6416591dffdaSSteven Rostedt (Red Hat) ops->flags |= FTRACE_OPS_FL_DELETED;
64172840f84fSSteven Rostedt (VMware) ftrace_free_filter(ops);
6418591dffdaSSteven Rostedt (Red Hat) mutex_unlock(&ftrace_lock);
6419591dffdaSSteven Rostedt (Red Hat) }
6420591dffdaSSteven Rostedt (Red Hat)
ftrace_init_dyn_tracefs(struct dentry * d_tracer)64218434dc93SSteven Rostedt (Red Hat) static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
64225072c59fSSteven Rostedt {
64235072c59fSSteven Rostedt
642421ccc9cdSSteven Rostedt (VMware) trace_create_file("available_filter_functions", TRACE_MODE_READ,
64255072c59fSSteven Rostedt d_tracer, NULL, &ftrace_avail_fops);
64265072c59fSSteven Rostedt
642783f74441SJiri Olsa trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
642883f74441SJiri Olsa d_tracer, NULL, &ftrace_avail_addrs_fops);
642983f74441SJiri Olsa
643021ccc9cdSSteven Rostedt (VMware) trace_create_file("enabled_functions", TRACE_MODE_READ,
6431647bcd03SSteven Rostedt d_tracer, NULL, &ftrace_enabled_fops);
6432647bcd03SSteven Rostedt
6433e11b521aSSteven Rostedt (Google) trace_create_file("touched_functions", TRACE_MODE_READ,
6434e11b521aSSteven Rostedt (Google) d_tracer, NULL, &ftrace_touched_fops);
6435e11b521aSSteven Rostedt (Google)
6436591dffdaSSteven Rostedt (Red Hat) ftrace_create_filter_files(&global_ops, d_tracer);
6437ad90c0e3SSteven Rostedt
6438ea4e2bc4SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
643921ccc9cdSSteven Rostedt (VMware) trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6440ea4e2bc4SSteven Rostedt NULL,
6441ea4e2bc4SSteven Rostedt &ftrace_graph_fops);
644221ccc9cdSSteven Rostedt (VMware) trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
644329ad23b0SNamhyung Kim NULL,
644429ad23b0SNamhyung Kim &ftrace_graph_notrace_fops);
6445ea4e2bc4SSteven Rostedt #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6446ea4e2bc4SSteven Rostedt
64475072c59fSSteven Rostedt return 0;
64485072c59fSSteven Rostedt }
64495072c59fSSteven Rostedt
ftrace_cmp_ips(const void * a,const void * b)64509fd49328SSteven Rostedt static int ftrace_cmp_ips(const void *a, const void *b)
645168950619SSteven Rostedt {
64529fd49328SSteven Rostedt const unsigned long *ipa = a;
64539fd49328SSteven Rostedt const unsigned long *ipb = b;
645468950619SSteven Rostedt
64559fd49328SSteven Rostedt if (*ipa > *ipb)
64569fd49328SSteven Rostedt return 1;
64579fd49328SSteven Rostedt if (*ipa < *ipb)
64589fd49328SSteven Rostedt return -1;
64599fd49328SSteven Rostedt return 0;
64609fd49328SSteven Rostedt }
64619fd49328SSteven Rostedt
64628147dc78SSteven Rostedt (VMware) #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
test_is_sorted(unsigned long * start,unsigned long count)64638147dc78SSteven Rostedt (VMware) static void test_is_sorted(unsigned long *start, unsigned long count)
64648147dc78SSteven Rostedt (VMware) {
64658147dc78SSteven Rostedt (VMware) int i;
64668147dc78SSteven Rostedt (VMware)
64678147dc78SSteven Rostedt (VMware) for (i = 1; i < count; i++) {
64688147dc78SSteven Rostedt (VMware) if (WARN(start[i - 1] > start[i],
64698147dc78SSteven Rostedt (VMware) "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
64708147dc78SSteven Rostedt (VMware) (void *)start[i - 1], start[i - 1],
64718147dc78SSteven Rostedt (VMware) (void *)start[i], start[i]))
64728147dc78SSteven Rostedt (VMware) break;
64738147dc78SSteven Rostedt (VMware) }
64748147dc78SSteven Rostedt (VMware) if (i == count)
64758147dc78SSteven Rostedt (VMware) pr_info("ftrace section at %px sorted properly\n", start);
64768147dc78SSteven Rostedt (VMware) }
64778147dc78SSteven Rostedt (VMware) #else
test_is_sorted(unsigned long * start,unsigned long count)64788147dc78SSteven Rostedt (VMware) static void test_is_sorted(unsigned long *start, unsigned long count)
64798147dc78SSteven Rostedt (VMware) {
64808147dc78SSteven Rostedt (VMware) }
64818147dc78SSteven Rostedt (VMware) #endif
64828147dc78SSteven Rostedt (VMware)
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)64835cb084bbSJiri Olsa static int ftrace_process_locs(struct module *mod,
648431e88909SSteven Rostedt unsigned long *start,
648568bf21aaSSteven Rostedt unsigned long *end)
648668bf21aaSSteven Rostedt {
648726efd79cSZheng Yejian struct ftrace_page *pg_unuse = NULL;
6488706c81f8SSteven Rostedt struct ftrace_page *start_pg;
6489a7900875SSteven Rostedt struct ftrace_page *pg;
6490706c81f8SSteven Rostedt struct dyn_ftrace *rec;
649126efd79cSZheng Yejian unsigned long skipped = 0;
6492a7900875SSteven Rostedt unsigned long count;
649368bf21aaSSteven Rostedt unsigned long *p;
649468bf21aaSSteven Rostedt unsigned long addr;
64954376cac6SSteven Rostedt unsigned long flags = 0; /* Shut up gcc */
6496a7900875SSteven Rostedt int ret = -ENOMEM;
6497a7900875SSteven Rostedt
6498a7900875SSteven Rostedt count = end - start;
6499a7900875SSteven Rostedt
6500a7900875SSteven Rostedt if (!count)
6501a7900875SSteven Rostedt return 0;
6502a7900875SSteven Rostedt
650372b3942aSYinan Liu /*
650472b3942aSYinan Liu * Sorting mcount in vmlinux at build time depend on
65056b9b6413SSteven Rostedt (Google) * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
650672b3942aSYinan Liu * modules can not be sorted at build time.
650772b3942aSYinan Liu */
65086b9b6413SSteven Rostedt (Google) if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
65099fd49328SSteven Rostedt sort(start, count, sizeof(*start),
65106db02903SRasmus Villemoes ftrace_cmp_ips, NULL);
65118147dc78SSteven Rostedt (VMware) } else {
65128147dc78SSteven Rostedt (VMware) test_is_sorted(start, count);
651372b3942aSYinan Liu }
65149fd49328SSteven Rostedt
6515706c81f8SSteven Rostedt start_pg = ftrace_allocate_pages(count);
6516706c81f8SSteven Rostedt if (!start_pg)
6517a7900875SSteven Rostedt return -ENOMEM;
651868bf21aaSSteven Rostedt
6519e6ea44e9SSteven Rostedt mutex_lock(&ftrace_lock);
6520a7900875SSteven Rostedt
652132082309SSteven Rostedt /*
652232082309SSteven Rostedt * Core and each module needs their own pages, as
652332082309SSteven Rostedt * modules will free them when they are removed.
652432082309SSteven Rostedt * Force a new page to be allocated for modules.
652532082309SSteven Rostedt */
6526a7900875SSteven Rostedt if (!mod) {
6527a7900875SSteven Rostedt WARN_ON(ftrace_pages || ftrace_pages_start);
6528a7900875SSteven Rostedt /* First initialization */
6529706c81f8SSteven Rostedt ftrace_pages = ftrace_pages_start = start_pg;
6530a7900875SSteven Rostedt } else {
653132082309SSteven Rostedt if (!ftrace_pages)
6532a7900875SSteven Rostedt goto out;
653332082309SSteven Rostedt
6534a7900875SSteven Rostedt if (WARN_ON(ftrace_pages->next)) {
6535a7900875SSteven Rostedt /* Hmm, we have free pages? */
6536a7900875SSteven Rostedt while (ftrace_pages->next)
653732082309SSteven Rostedt ftrace_pages = ftrace_pages->next;
653832082309SSteven Rostedt }
6539a7900875SSteven Rostedt
6540706c81f8SSteven Rostedt ftrace_pages->next = start_pg;
654132082309SSteven Rostedt }
654232082309SSteven Rostedt
654368bf21aaSSteven Rostedt p = start;
6544706c81f8SSteven Rostedt pg = start_pg;
654568bf21aaSSteven Rostedt while (p < end) {
6546db42523bSLinus Torvalds unsigned long end_offset;
654768bf21aaSSteven Rostedt addr = ftrace_call_adjust(*p++);
654820e5227eSSteven Rostedt /*
654920e5227eSSteven Rostedt * Some architecture linkers will pad between
655020e5227eSSteven Rostedt * the different mcount_loc sections of different
655120e5227eSSteven Rostedt * object files to satisfy alignments.
655220e5227eSSteven Rostedt * Skip any NULL pointers.
655320e5227eSSteven Rostedt */
655426efd79cSZheng Yejian if (!addr) {
655526efd79cSZheng Yejian skipped++;
655620e5227eSSteven Rostedt continue;
655726efd79cSZheng Yejian }
6558706c81f8SSteven Rostedt
6559db42523bSLinus Torvalds end_offset = (pg->index+1) * sizeof(pg->records[0]);
6560db42523bSLinus Torvalds if (end_offset > PAGE_SIZE << pg->order) {
6561706c81f8SSteven Rostedt /* We should have allocated enough */
6562706c81f8SSteven Rostedt if (WARN_ON(!pg->next))
6563a7900875SSteven Rostedt break;
6564706c81f8SSteven Rostedt pg = pg->next;
656568bf21aaSSteven Rostedt }
656668bf21aaSSteven Rostedt
6567706c81f8SSteven Rostedt rec = &pg->records[pg->index++];
6568706c81f8SSteven Rostedt rec->ip = addr;
6569706c81f8SSteven Rostedt }
6570706c81f8SSteven Rostedt
657126efd79cSZheng Yejian if (pg->next) {
657226efd79cSZheng Yejian pg_unuse = pg->next;
657326efd79cSZheng Yejian pg->next = NULL;
657426efd79cSZheng Yejian }
6575706c81f8SSteven Rostedt
6576706c81f8SSteven Rostedt /* Assign the last page to ftrace_pages */
6577706c81f8SSteven Rostedt ftrace_pages = pg;
6578706c81f8SSteven Rostedt
6579a4f18ed1SSteven Rostedt /*
65804376cac6SSteven Rostedt * We only need to disable interrupts on start up
65814376cac6SSteven Rostedt * because we are modifying code that an interrupt
65824376cac6SSteven Rostedt * may execute, and the modification is not atomic.
65834376cac6SSteven Rostedt * But for modules, nothing runs the code we modify
65844376cac6SSteven Rostedt * until we are finished with it, and there's no
65854376cac6SSteven Rostedt * reason to cause large interrupt latencies while we do it.
6586a4f18ed1SSteven Rostedt */
65874376cac6SSteven Rostedt if (!mod)
6588a4f18ed1SSteven Rostedt local_irq_save(flags);
65891dc43cf0SJiri Slaby ftrace_update_code(mod, start_pg);
65904376cac6SSteven Rostedt if (!mod)
6591a4f18ed1SSteven Rostedt local_irq_restore(flags);
6592a7900875SSteven Rostedt ret = 0;
6593a7900875SSteven Rostedt out:
6594e6ea44e9SSteven Rostedt mutex_unlock(&ftrace_lock);
659568bf21aaSSteven Rostedt
659626efd79cSZheng Yejian /* We should have used all pages unless we skipped some */
659726efd79cSZheng Yejian if (pg_unuse) {
659826efd79cSZheng Yejian WARN_ON(!skipped);
65997b4881daSZheng Yejian /* Need to synchronize with ftrace_location_range() */
66007b4881daSZheng Yejian synchronize_rcu();
660126efd79cSZheng Yejian ftrace_free_pages(pg_unuse);
660226efd79cSZheng Yejian }
6603a7900875SSteven Rostedt return ret;
660468bf21aaSSteven Rostedt }
660568bf21aaSSteven Rostedt
6606aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_func {
6607aba4b5c2SSteven Rostedt (VMware) struct list_head list;
6608aba4b5c2SSteven Rostedt (VMware) char *name;
6609aba4b5c2SSteven Rostedt (VMware) unsigned long ip;
6610aba4b5c2SSteven Rostedt (VMware) unsigned int size;
6611aba4b5c2SSteven Rostedt (VMware) };
6612aba4b5c2SSteven Rostedt (VMware)
6613aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_map {
66146aa69784SSteven Rostedt (VMware) struct rcu_head rcu;
6615aba4b5c2SSteven Rostedt (VMware) struct list_head list;
6616aba4b5c2SSteven Rostedt (VMware) struct module *mod;
6617aba4b5c2SSteven Rostedt (VMware) unsigned long start_addr;
6618aba4b5c2SSteven Rostedt (VMware) unsigned long end_addr;
6619aba4b5c2SSteven Rostedt (VMware) struct list_head funcs;
66206171a031SSteven Rostedt (VMware) unsigned int num_funcs;
6621aba4b5c2SSteven Rostedt (VMware) };
6622aba4b5c2SSteven Rostedt (VMware)
ftrace_get_trampoline_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)6623fc0ea795SAdrian Hunter static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6624fc0ea795SAdrian Hunter unsigned long *value, char *type,
6625fc0ea795SAdrian Hunter char *name, char *module_name,
6626fc0ea795SAdrian Hunter int *exported)
6627fc0ea795SAdrian Hunter {
6628fc0ea795SAdrian Hunter struct ftrace_ops *op;
6629fc0ea795SAdrian Hunter
6630fc0ea795SAdrian Hunter list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6631fc0ea795SAdrian Hunter if (!op->trampoline || symnum--)
6632fc0ea795SAdrian Hunter continue;
6633fc0ea795SAdrian Hunter *value = op->trampoline;
6634fc0ea795SAdrian Hunter *type = 't';
6635d0c2d66fSAzeem Shaikh strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6636d0c2d66fSAzeem Shaikh strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6637fc0ea795SAdrian Hunter *exported = 0;
6638fc0ea795SAdrian Hunter return 0;
6639fc0ea795SAdrian Hunter }
6640fc0ea795SAdrian Hunter
6641fc0ea795SAdrian Hunter return -ERANGE;
6642fc0ea795SAdrian Hunter }
6643fc0ea795SAdrian Hunter
6644123d6455SWang Jingjin #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
6645123d6455SWang Jingjin /*
6646123d6455SWang Jingjin * Check if the current ops references the given ip.
6647123d6455SWang Jingjin *
6648123d6455SWang Jingjin * If the ops traces all functions, then it was already accounted for.
6649123d6455SWang Jingjin * If the ops does not trace the current record function, skip it.
6650123d6455SWang Jingjin * If the ops ignores the function via notrace filter, skip it.
6651123d6455SWang Jingjin */
6652123d6455SWang Jingjin static bool
ops_references_ip(struct ftrace_ops * ops,unsigned long ip)6653123d6455SWang Jingjin ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
6654123d6455SWang Jingjin {
6655123d6455SWang Jingjin /* If ops isn't enabled, ignore it */
6656123d6455SWang Jingjin if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6657123d6455SWang Jingjin return false;
6658123d6455SWang Jingjin
6659123d6455SWang Jingjin /* If ops traces all then it includes this function */
6660123d6455SWang Jingjin if (ops_traces_mod(ops))
6661123d6455SWang Jingjin return true;
6662123d6455SWang Jingjin
6663123d6455SWang Jingjin /* The function must be in the filter */
6664123d6455SWang Jingjin if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
6665123d6455SWang Jingjin !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
6666123d6455SWang Jingjin return false;
6667123d6455SWang Jingjin
6668123d6455SWang Jingjin /* If in notrace hash, we ignore it too */
6669123d6455SWang Jingjin if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
6670123d6455SWang Jingjin return false;
6671123d6455SWang Jingjin
6672123d6455SWang Jingjin return true;
6673123d6455SWang Jingjin }
6674123d6455SWang Jingjin #endif
6675123d6455SWang Jingjin
667693eb677dSSteven Rostedt #ifdef CONFIG_MODULES
667732082309SSteven Rostedt
667832082309SSteven Rostedt #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
667932082309SSteven Rostedt
66806aa69784SSteven Rostedt (VMware) static LIST_HEAD(ftrace_mod_maps);
66816aa69784SSteven Rostedt (VMware)
referenced_filters(struct dyn_ftrace * rec)6682b7ffffbbSSteven Rostedt (Red Hat) static int referenced_filters(struct dyn_ftrace *rec)
6683b7ffffbbSSteven Rostedt (Red Hat) {
6684b7ffffbbSSteven Rostedt (Red Hat) struct ftrace_ops *ops;
6685b7ffffbbSSteven Rostedt (Red Hat) int cnt = 0;
6686b7ffffbbSSteven Rostedt (Red Hat)
6687b7ffffbbSSteven Rostedt (Red Hat) for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6688123d6455SWang Jingjin if (ops_references_ip(ops, rec->ip)) {
6689c5f51572SChengming Zhou if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6690c5f51572SChengming Zhou continue;
6691c5f51572SChengming Zhou if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6692c5f51572SChengming Zhou continue;
6693b7ffffbbSSteven Rostedt (Red Hat) cnt++;
66948a224ffbSChengming Zhou if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
66958a224ffbSChengming Zhou rec->flags |= FTRACE_FL_REGS;
6696c5f51572SChengming Zhou if (cnt == 1 && ops->trampoline)
6697c5f51572SChengming Zhou rec->flags |= FTRACE_FL_TRAMP;
6698c5f51572SChengming Zhou else
6699c5f51572SChengming Zhou rec->flags &= ~FTRACE_FL_TRAMP;
67008a224ffbSChengming Zhou }
6701b7ffffbbSSteven Rostedt (Red Hat) }
6702b7ffffbbSSteven Rostedt (Red Hat)
6703b7ffffbbSSteven Rostedt (Red Hat) return cnt;
6704b7ffffbbSSteven Rostedt (Red Hat) }
6705b7ffffbbSSteven Rostedt (Red Hat)
67062a5bfe47SSteven Rostedt (VMware) static void
clear_mod_from_hash(struct ftrace_page * pg,struct ftrace_hash * hash)67072a5bfe47SSteven Rostedt (VMware) clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
67082a5bfe47SSteven Rostedt (VMware) {
67092a5bfe47SSteven Rostedt (VMware) struct ftrace_func_entry *entry;
67102a5bfe47SSteven Rostedt (VMware) struct dyn_ftrace *rec;
67112a5bfe47SSteven Rostedt (VMware) int i;
67122a5bfe47SSteven Rostedt (VMware)
67132a5bfe47SSteven Rostedt (VMware) if (ftrace_hash_empty(hash))
67142a5bfe47SSteven Rostedt (VMware) return;
67152a5bfe47SSteven Rostedt (VMware)
67162a5bfe47SSteven Rostedt (VMware) for (i = 0; i < pg->index; i++) {
67172a5bfe47SSteven Rostedt (VMware) rec = &pg->records[i];
67182a5bfe47SSteven Rostedt (VMware) entry = __ftrace_lookup_ip(hash, rec->ip);
67192a5bfe47SSteven Rostedt (VMware) /*
67202a5bfe47SSteven Rostedt (VMware) * Do not allow this rec to match again.
67212a5bfe47SSteven Rostedt (VMware) * Yeah, it may waste some memory, but will be removed
67222a5bfe47SSteven Rostedt (VMware) * if/when the hash is modified again.
67232a5bfe47SSteven Rostedt (VMware) */
67242a5bfe47SSteven Rostedt (VMware) if (entry)
67252a5bfe47SSteven Rostedt (VMware) entry->ip = 0;
67262a5bfe47SSteven Rostedt (VMware) }
67272a5bfe47SSteven Rostedt (VMware) }
67282a5bfe47SSteven Rostedt (VMware)
6729f2cc020dSIngo Molnar /* Clear any records from hashes */
clear_mod_from_hashes(struct ftrace_page * pg)67302a5bfe47SSteven Rostedt (VMware) static void clear_mod_from_hashes(struct ftrace_page *pg)
67312a5bfe47SSteven Rostedt (VMware) {
67322a5bfe47SSteven Rostedt (VMware) struct trace_array *tr;
67332a5bfe47SSteven Rostedt (VMware)
67342a5bfe47SSteven Rostedt (VMware) mutex_lock(&trace_types_lock);
67352a5bfe47SSteven Rostedt (VMware) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
67362a5bfe47SSteven Rostedt (VMware) if (!tr->ops || !tr->ops->func_hash)
67372a5bfe47SSteven Rostedt (VMware) continue;
67382a5bfe47SSteven Rostedt (VMware) mutex_lock(&tr->ops->func_hash->regex_lock);
67392a5bfe47SSteven Rostedt (VMware) clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
67402a5bfe47SSteven Rostedt (VMware) clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
67412a5bfe47SSteven Rostedt (VMware) mutex_unlock(&tr->ops->func_hash->regex_lock);
67422a5bfe47SSteven Rostedt (VMware) }
67432a5bfe47SSteven Rostedt (VMware) mutex_unlock(&trace_types_lock);
67442a5bfe47SSteven Rostedt (VMware) }
67452a5bfe47SSteven Rostedt (VMware)
ftrace_free_mod_map(struct rcu_head * rcu)67466aa69784SSteven Rostedt (VMware) static void ftrace_free_mod_map(struct rcu_head *rcu)
67476aa69784SSteven Rostedt (VMware) {
67486aa69784SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
67496aa69784SSteven Rostedt (VMware) struct ftrace_mod_func *mod_func;
67506aa69784SSteven Rostedt (VMware) struct ftrace_mod_func *n;
67516aa69784SSteven Rostedt (VMware)
67526aa69784SSteven Rostedt (VMware) /* All the contents of mod_map are now not visible to readers */
67536aa69784SSteven Rostedt (VMware) list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
67546aa69784SSteven Rostedt (VMware) kfree(mod_func->name);
67556aa69784SSteven Rostedt (VMware) list_del(&mod_func->list);
67566aa69784SSteven Rostedt (VMware) kfree(mod_func);
67576aa69784SSteven Rostedt (VMware) }
67586aa69784SSteven Rostedt (VMware)
67596aa69784SSteven Rostedt (VMware) kfree(mod_map);
67606aa69784SSteven Rostedt (VMware) }
67616aa69784SSteven Rostedt (VMware)
ftrace_release_mod(struct module * mod)6762e7247a15Sjolsa@redhat.com void ftrace_release_mod(struct module *mod)
676393eb677dSSteven Rostedt {
67646aa69784SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map;
67656aa69784SSteven Rostedt (VMware) struct ftrace_mod_map *n;
676693eb677dSSteven Rostedt struct dyn_ftrace *rec;
676732082309SSteven Rostedt struct ftrace_page **last_pg;
67682a5bfe47SSteven Rostedt (VMware) struct ftrace_page *tmp_page = NULL;
676993eb677dSSteven Rostedt struct ftrace_page *pg;
677093eb677dSSteven Rostedt
677193eb677dSSteven Rostedt mutex_lock(&ftrace_lock);
677245a4a237SSteven Rostedt
677345a4a237SSteven Rostedt if (ftrace_disabled)
677445a4a237SSteven Rostedt goto out_unlock;
677545a4a237SSteven Rostedt
67766aa69784SSteven Rostedt (VMware) list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
67776aa69784SSteven Rostedt (VMware) if (mod_map->mod == mod) {
67786aa69784SSteven Rostedt (VMware) list_del_rcu(&mod_map->list);
677974401729SPaul E. McKenney call_rcu(&mod_map->rcu, ftrace_free_mod_map);
67806aa69784SSteven Rostedt (VMware) break;
67816aa69784SSteven Rostedt (VMware) }
67826aa69784SSteven Rostedt (VMware) }
67836aa69784SSteven Rostedt (VMware)
678432082309SSteven Rostedt /*
678532082309SSteven Rostedt * Each module has its own ftrace_pages, remove
678632082309SSteven Rostedt * them from the list.
678732082309SSteven Rostedt */
678832082309SSteven Rostedt last_pg = &ftrace_pages_start;
678932082309SSteven Rostedt for (pg = ftrace_pages_start; pg; pg = *last_pg) {
679032082309SSteven Rostedt rec = &pg->records[0];
679113511489SLevi Yun if (within_module(rec->ip, mod)) {
679293eb677dSSteven Rostedt /*
679332082309SSteven Rostedt * As core pages are first, the first
679432082309SSteven Rostedt * page should never be a module page.
679593eb677dSSteven Rostedt */
679632082309SSteven Rostedt if (WARN_ON(pg == ftrace_pages_start))
679732082309SSteven Rostedt goto out_unlock;
679832082309SSteven Rostedt
679932082309SSteven Rostedt /* Check if we are deleting the last page */
680032082309SSteven Rostedt if (pg == ftrace_pages)
680132082309SSteven Rostedt ftrace_pages = next_to_ftrace_page(last_pg);
680232082309SSteven Rostedt
680383dd1493SSteven Rostedt (VMware) ftrace_update_tot_cnt -= pg->index;
680432082309SSteven Rostedt *last_pg = pg->next;
68052a5bfe47SSteven Rostedt (VMware)
68062a5bfe47SSteven Rostedt (VMware) pg->next = tmp_page;
68072a5bfe47SSteven Rostedt (VMware) tmp_page = pg;
680832082309SSteven Rostedt } else
680932082309SSteven Rostedt last_pg = &pg->next;
681093eb677dSSteven Rostedt }
681145a4a237SSteven Rostedt out_unlock:
681293eb677dSSteven Rostedt mutex_unlock(&ftrace_lock);
68132a5bfe47SSteven Rostedt (VMware)
68147b4881daSZheng Yejian /* Need to synchronize with ftrace_location_range() */
68157b4881daSZheng Yejian if (tmp_page)
68167b4881daSZheng Yejian synchronize_rcu();
68172a5bfe47SSteven Rostedt (VMware) for (pg = tmp_page; pg; pg = tmp_page) {
68182a5bfe47SSteven Rostedt (VMware)
68192a5bfe47SSteven Rostedt (VMware) /* Needs to be called outside of ftrace_lock */
68202a5bfe47SSteven Rostedt (VMware) clear_mod_from_hashes(pg);
68212a5bfe47SSteven Rostedt (VMware)
6822db42523bSLinus Torvalds if (pg->records) {
6823db42523bSLinus Torvalds free_pages((unsigned long)pg->records, pg->order);
6824db42523bSLinus Torvalds ftrace_number_of_pages -= 1 << pg->order;
6825db42523bSLinus Torvalds }
68262a5bfe47SSteven Rostedt (VMware) tmp_page = pg->next;
68272a5bfe47SSteven Rostedt (VMware) kfree(pg);
6828da537f0aSSteven Rostedt (VMware) ftrace_number_of_groups--;
68292a5bfe47SSteven Rostedt (VMware) }
683093eb677dSSteven Rostedt }
683193eb677dSSteven Rostedt
ftrace_module_enable(struct module * mod)68327dcd182bSJessica Yu void ftrace_module_enable(struct module *mod)
6833b7ffffbbSSteven Rostedt (Red Hat) {
6834b7ffffbbSSteven Rostedt (Red Hat) struct dyn_ftrace *rec;
6835b7ffffbbSSteven Rostedt (Red Hat) struct ftrace_page *pg;
6836b7ffffbbSSteven Rostedt (Red Hat)
6837b7ffffbbSSteven Rostedt (Red Hat) mutex_lock(&ftrace_lock);
6838b7ffffbbSSteven Rostedt (Red Hat)
6839b7ffffbbSSteven Rostedt (Red Hat) if (ftrace_disabled)
6840b7ffffbbSSteven Rostedt (Red Hat) goto out_unlock;
6841b7ffffbbSSteven Rostedt (Red Hat)
6842b7ffffbbSSteven Rostedt (Red Hat) /*
6843b7ffffbbSSteven Rostedt (Red Hat) * If the tracing is enabled, go ahead and enable the record.
6844b7ffffbbSSteven Rostedt (Red Hat) *
68459efb85c5SHariprasad Kelam * The reason not to enable the record immediately is the
6846b7ffffbbSSteven Rostedt (Red Hat) * inherent check of ftrace_make_nop/ftrace_make_call for
6847b7ffffbbSSteven Rostedt (Red Hat) * correct previous instructions. Making first the NOP
6848b7ffffbbSSteven Rostedt (Red Hat) * conversion puts the module to the correct state, thus
6849b7ffffbbSSteven Rostedt (Red Hat) * passing the ftrace_make_call check.
6850b7ffffbbSSteven Rostedt (Red Hat) *
6851b7ffffbbSSteven Rostedt (Red Hat) * We also delay this to after the module code already set the
6852b7ffffbbSSteven Rostedt (Red Hat) * text to read-only, as we now need to set it back to read-write
6853b7ffffbbSSteven Rostedt (Red Hat) * so that we can modify the text.
6854b7ffffbbSSteven Rostedt (Red Hat) */
6855b7ffffbbSSteven Rostedt (Red Hat) if (ftrace_start_up)
6856b7ffffbbSSteven Rostedt (Red Hat) ftrace_arch_code_modify_prepare();
6857b7ffffbbSSteven Rostedt (Red Hat)
6858b7ffffbbSSteven Rostedt (Red Hat) do_for_each_ftrace_rec(pg, rec) {
6859b7ffffbbSSteven Rostedt (Red Hat) int cnt;
6860b7ffffbbSSteven Rostedt (Red Hat) /*
6861b7ffffbbSSteven Rostedt (Red Hat) * do_for_each_ftrace_rec() is a double loop.
6862b7ffffbbSSteven Rostedt (Red Hat) * module text shares the pg. If a record is
6863b7ffffbbSSteven Rostedt (Red Hat) * not part of this module, then skip this pg,
6864b7ffffbbSSteven Rostedt (Red Hat) * which the "break" will do.
6865b7ffffbbSSteven Rostedt (Red Hat) */
686613511489SLevi Yun if (!within_module(rec->ip, mod))
6867b7ffffbbSSteven Rostedt (Red Hat) break;
6868b7ffffbbSSteven Rostedt (Red Hat)
6869b39181f7SSteven Rostedt (Google) /* Weak functions should still be ignored */
6870b39181f7SSteven Rostedt (Google) if (!test_for_valid_rec(rec)) {
6871b39181f7SSteven Rostedt (Google) /* Clear all other flags. Should not be enabled anyway */
6872b39181f7SSteven Rostedt (Google) rec->flags = FTRACE_FL_DISABLED;
6873b39181f7SSteven Rostedt (Google) continue;
6874b39181f7SSteven Rostedt (Google) }
6875b39181f7SSteven Rostedt (Google)
6876b7ffffbbSSteven Rostedt (Red Hat) cnt = 0;
6877b7ffffbbSSteven Rostedt (Red Hat)
6878b7ffffbbSSteven Rostedt (Red Hat) /*
6879b7ffffbbSSteven Rostedt (Red Hat) * When adding a module, we need to check if tracers are
6880b7ffffbbSSteven Rostedt (Red Hat) * currently enabled and if they are, and can trace this record,
6881b7ffffbbSSteven Rostedt (Red Hat) * we need to enable the module functions as well as update the
6882b7ffffbbSSteven Rostedt (Red Hat) * reference counts for those function records.
6883b7ffffbbSSteven Rostedt (Red Hat) */
6884b7ffffbbSSteven Rostedt (Red Hat) if (ftrace_start_up)
6885b7ffffbbSSteven Rostedt (Red Hat) cnt += referenced_filters(rec);
6886b7ffffbbSSteven Rostedt (Red Hat)
68878a224ffbSChengming Zhou rec->flags &= ~FTRACE_FL_DISABLED;
68888a224ffbSChengming Zhou rec->flags += cnt;
6889b7ffffbbSSteven Rostedt (Red Hat)
6890b7ffffbbSSteven Rostedt (Red Hat) if (ftrace_start_up && cnt) {
6891b7ffffbbSSteven Rostedt (Red Hat) int failed = __ftrace_replace_code(rec, 1);
6892b7ffffbbSSteven Rostedt (Red Hat) if (failed) {
6893b7ffffbbSSteven Rostedt (Red Hat) ftrace_bug(failed, rec);
6894b7ffffbbSSteven Rostedt (Red Hat) goto out_loop;
6895b7ffffbbSSteven Rostedt (Red Hat) }
6896b7ffffbbSSteven Rostedt (Red Hat) }
6897b7ffffbbSSteven Rostedt (Red Hat)
6898b7ffffbbSSteven Rostedt (Red Hat) } while_for_each_ftrace_rec();
6899b7ffffbbSSteven Rostedt (Red Hat)
6900b7ffffbbSSteven Rostedt (Red Hat) out_loop:
6901b7ffffbbSSteven Rostedt (Red Hat) if (ftrace_start_up)
6902b7ffffbbSSteven Rostedt (Red Hat) ftrace_arch_code_modify_post_process();
6903b7ffffbbSSteven Rostedt (Red Hat)
6904b7ffffbbSSteven Rostedt (Red Hat) out_unlock:
6905b7ffffbbSSteven Rostedt (Red Hat) mutex_unlock(&ftrace_lock);
6906d7fbf8dfSSteven Rostedt (VMware)
6907d7fbf8dfSSteven Rostedt (VMware) process_cached_mods(mod->name);
6908b7ffffbbSSteven Rostedt (Red Hat) }
6909b7ffffbbSSteven Rostedt (Red Hat)
ftrace_module_init(struct module * mod)6910a949ae56SSteven Rostedt (Red Hat) void ftrace_module_init(struct module *mod)
691193eb677dSSteven Rostedt {
69122889c658SYuntao Wang int ret;
69132889c658SYuntao Wang
691497e9b4fcSSteven Rostedt (Red Hat) if (ftrace_disabled || !mod->num_ftrace_callsites)
6915b6b71f66SAbel Vesa return;
6916b6b71f66SAbel Vesa
69172889c658SYuntao Wang ret = ftrace_process_locs(mod, mod->ftrace_callsites,
691897e9b4fcSSteven Rostedt (Red Hat) mod->ftrace_callsites + mod->num_ftrace_callsites);
69192889c658SYuntao Wang if (ret)
69202889c658SYuntao Wang pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
69212889c658SYuntao Wang mod->name);
692293eb677dSSteven Rostedt }
6923aba4b5c2SSteven Rostedt (VMware)
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)6924aba4b5c2SSteven Rostedt (VMware) static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6925aba4b5c2SSteven Rostedt (VMware) struct dyn_ftrace *rec)
6926aba4b5c2SSteven Rostedt (VMware) {
6927aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_func *mod_func;
6928aba4b5c2SSteven Rostedt (VMware) unsigned long symsize;
6929aba4b5c2SSteven Rostedt (VMware) unsigned long offset;
6930aba4b5c2SSteven Rostedt (VMware) char str[KSYM_SYMBOL_LEN];
6931aba4b5c2SSteven Rostedt (VMware) char *modname;
6932aba4b5c2SSteven Rostedt (VMware) const char *ret;
6933aba4b5c2SSteven Rostedt (VMware)
6934aba4b5c2SSteven Rostedt (VMware) ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6935aba4b5c2SSteven Rostedt (VMware) if (!ret)
6936aba4b5c2SSteven Rostedt (VMware) return;
6937aba4b5c2SSteven Rostedt (VMware)
6938aba4b5c2SSteven Rostedt (VMware) mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6939aba4b5c2SSteven Rostedt (VMware) if (!mod_func)
6940aba4b5c2SSteven Rostedt (VMware) return;
6941aba4b5c2SSteven Rostedt (VMware)
6942aba4b5c2SSteven Rostedt (VMware) mod_func->name = kstrdup(str, GFP_KERNEL);
6943aba4b5c2SSteven Rostedt (VMware) if (!mod_func->name) {
6944aba4b5c2SSteven Rostedt (VMware) kfree(mod_func);
6945aba4b5c2SSteven Rostedt (VMware) return;
6946aba4b5c2SSteven Rostedt (VMware) }
6947aba4b5c2SSteven Rostedt (VMware)
6948aba4b5c2SSteven Rostedt (VMware) mod_func->ip = rec->ip - offset;
6949aba4b5c2SSteven Rostedt (VMware) mod_func->size = symsize;
6950aba4b5c2SSteven Rostedt (VMware)
69516171a031SSteven Rostedt (VMware) mod_map->num_funcs++;
69526171a031SSteven Rostedt (VMware)
6953aba4b5c2SSteven Rostedt (VMware) list_add_rcu(&mod_func->list, &mod_map->funcs);
6954aba4b5c2SSteven Rostedt (VMware) }
6955aba4b5c2SSteven Rostedt (VMware)
6956aba4b5c2SSteven Rostedt (VMware) static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)6957aba4b5c2SSteven Rostedt (VMware) allocate_ftrace_mod_map(struct module *mod,
6958aba4b5c2SSteven Rostedt (VMware) unsigned long start, unsigned long end)
6959aba4b5c2SSteven Rostedt (VMware) {
6960aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map;
6961aba4b5c2SSteven Rostedt (VMware)
6962aba4b5c2SSteven Rostedt (VMware) mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6963aba4b5c2SSteven Rostedt (VMware) if (!mod_map)
6964aba4b5c2SSteven Rostedt (VMware) return NULL;
6965aba4b5c2SSteven Rostedt (VMware)
6966aba4b5c2SSteven Rostedt (VMware) mod_map->mod = mod;
6967aba4b5c2SSteven Rostedt (VMware) mod_map->start_addr = start;
6968aba4b5c2SSteven Rostedt (VMware) mod_map->end_addr = end;
69696171a031SSteven Rostedt (VMware) mod_map->num_funcs = 0;
6970aba4b5c2SSteven Rostedt (VMware)
6971aba4b5c2SSteven Rostedt (VMware) INIT_LIST_HEAD_RCU(&mod_map->funcs);
6972aba4b5c2SSteven Rostedt (VMware)
6973aba4b5c2SSteven Rostedt (VMware) list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6974aba4b5c2SSteven Rostedt (VMware)
6975aba4b5c2SSteven Rostedt (VMware) return mod_map;
6976aba4b5c2SSteven Rostedt (VMware) }
6977aba4b5c2SSteven Rostedt (VMware)
6978aba4b5c2SSteven Rostedt (VMware) static const char *
ftrace_func_address_lookup(struct ftrace_mod_map * mod_map,unsigned long addr,unsigned long * size,unsigned long * off,char * sym)6979aba4b5c2SSteven Rostedt (VMware) ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6980aba4b5c2SSteven Rostedt (VMware) unsigned long addr, unsigned long *size,
6981aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char *sym)
6982aba4b5c2SSteven Rostedt (VMware) {
6983aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_func *found_func = NULL;
6984aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_func *mod_func;
6985aba4b5c2SSteven Rostedt (VMware)
6986aba4b5c2SSteven Rostedt (VMware) list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6987aba4b5c2SSteven Rostedt (VMware) if (addr >= mod_func->ip &&
6988aba4b5c2SSteven Rostedt (VMware) addr < mod_func->ip + mod_func->size) {
6989aba4b5c2SSteven Rostedt (VMware) found_func = mod_func;
6990aba4b5c2SSteven Rostedt (VMware) break;
6991aba4b5c2SSteven Rostedt (VMware) }
6992aba4b5c2SSteven Rostedt (VMware) }
6993aba4b5c2SSteven Rostedt (VMware)
6994aba4b5c2SSteven Rostedt (VMware) if (found_func) {
6995aba4b5c2SSteven Rostedt (VMware) if (size)
6996aba4b5c2SSteven Rostedt (VMware) *size = found_func->size;
6997aba4b5c2SSteven Rostedt (VMware) if (off)
6998aba4b5c2SSteven Rostedt (VMware) *off = addr - found_func->ip;
6999aba4b5c2SSteven Rostedt (VMware) if (sym)
7000d0c2d66fSAzeem Shaikh strscpy(sym, found_func->name, KSYM_NAME_LEN);
7001aba4b5c2SSteven Rostedt (VMware)
7002aba4b5c2SSteven Rostedt (VMware) return found_func->name;
7003aba4b5c2SSteven Rostedt (VMware) }
7004aba4b5c2SSteven Rostedt (VMware)
7005aba4b5c2SSteven Rostedt (VMware) return NULL;
7006aba4b5c2SSteven Rostedt (VMware) }
7007aba4b5c2SSteven Rostedt (VMware)
7008aba4b5c2SSteven Rostedt (VMware) const char *
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)7009aba4b5c2SSteven Rostedt (VMware) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7010aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char **modname, char *sym)
7011aba4b5c2SSteven Rostedt (VMware) {
7012aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map;
7013aba4b5c2SSteven Rostedt (VMware) const char *ret = NULL;
7014aba4b5c2SSteven Rostedt (VMware)
701574401729SPaul E. McKenney /* mod_map is freed via call_rcu() */
7016aba4b5c2SSteven Rostedt (VMware) preempt_disable();
7017aba4b5c2SSteven Rostedt (VMware) list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7018aba4b5c2SSteven Rostedt (VMware) ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7019aba4b5c2SSteven Rostedt (VMware) if (ret) {
7020aba4b5c2SSteven Rostedt (VMware) if (modname)
7021aba4b5c2SSteven Rostedt (VMware) *modname = mod_map->mod->name;
7022aba4b5c2SSteven Rostedt (VMware) break;
7023aba4b5c2SSteven Rostedt (VMware) }
7024aba4b5c2SSteven Rostedt (VMware) }
7025aba4b5c2SSteven Rostedt (VMware) preempt_enable();
7026aba4b5c2SSteven Rostedt (VMware)
7027aba4b5c2SSteven Rostedt (VMware) return ret;
7028aba4b5c2SSteven Rostedt (VMware) }
7029aba4b5c2SSteven Rostedt (VMware)
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)70306171a031SSteven Rostedt (VMware) int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
70316171a031SSteven Rostedt (VMware) char *type, char *name,
70326171a031SSteven Rostedt (VMware) char *module_name, int *exported)
70336171a031SSteven Rostedt (VMware) {
70346171a031SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map;
70356171a031SSteven Rostedt (VMware) struct ftrace_mod_func *mod_func;
7036fc0ea795SAdrian Hunter int ret;
70376171a031SSteven Rostedt (VMware)
70386171a031SSteven Rostedt (VMware) preempt_disable();
70396171a031SSteven Rostedt (VMware) list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
70406171a031SSteven Rostedt (VMware)
70416171a031SSteven Rostedt (VMware) if (symnum >= mod_map->num_funcs) {
70426171a031SSteven Rostedt (VMware) symnum -= mod_map->num_funcs;
70436171a031SSteven Rostedt (VMware) continue;
70446171a031SSteven Rostedt (VMware) }
70456171a031SSteven Rostedt (VMware)
70466171a031SSteven Rostedt (VMware) list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
70476171a031SSteven Rostedt (VMware) if (symnum > 1) {
70486171a031SSteven Rostedt (VMware) symnum--;
70496171a031SSteven Rostedt (VMware) continue;
70506171a031SSteven Rostedt (VMware) }
70516171a031SSteven Rostedt (VMware)
70526171a031SSteven Rostedt (VMware) *value = mod_func->ip;
70536171a031SSteven Rostedt (VMware) *type = 'T';
7054d0c2d66fSAzeem Shaikh strscpy(name, mod_func->name, KSYM_NAME_LEN);
7055d0c2d66fSAzeem Shaikh strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
70566171a031SSteven Rostedt (VMware) *exported = 1;
70576171a031SSteven Rostedt (VMware) preempt_enable();
70586171a031SSteven Rostedt (VMware) return 0;
70596171a031SSteven Rostedt (VMware) }
70606171a031SSteven Rostedt (VMware) WARN_ON(1);
70616171a031SSteven Rostedt (VMware) break;
70626171a031SSteven Rostedt (VMware) }
7063fc0ea795SAdrian Hunter ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7064fc0ea795SAdrian Hunter module_name, exported);
70656171a031SSteven Rostedt (VMware) preempt_enable();
7066fc0ea795SAdrian Hunter return ret;
70676171a031SSteven Rostedt (VMware) }
70686171a031SSteven Rostedt (VMware)
7069aba4b5c2SSteven Rostedt (VMware) #else
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7070aba4b5c2SSteven Rostedt (VMware) static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7071aba4b5c2SSteven Rostedt (VMware) struct dyn_ftrace *rec) { }
7072aba4b5c2SSteven Rostedt (VMware) static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7073aba4b5c2SSteven Rostedt (VMware) allocate_ftrace_mod_map(struct module *mod,
7074aba4b5c2SSteven Rostedt (VMware) unsigned long start, unsigned long end)
7075aba4b5c2SSteven Rostedt (VMware) {
7076aba4b5c2SSteven Rostedt (VMware) return NULL;
7077aba4b5c2SSteven Rostedt (VMware) }
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7078fc0ea795SAdrian Hunter int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7079fc0ea795SAdrian Hunter char *type, char *name, char *module_name,
7080fc0ea795SAdrian Hunter int *exported)
7081fc0ea795SAdrian Hunter {
7082fc0ea795SAdrian Hunter int ret;
7083fc0ea795SAdrian Hunter
7084fc0ea795SAdrian Hunter preempt_disable();
7085fc0ea795SAdrian Hunter ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7086fc0ea795SAdrian Hunter module_name, exported);
7087fc0ea795SAdrian Hunter preempt_enable();
7088fc0ea795SAdrian Hunter return ret;
7089fc0ea795SAdrian Hunter }
709093eb677dSSteven Rostedt #endif /* CONFIG_MODULES */
709193eb677dSSteven Rostedt
70928715b108SJoel Fernandes struct ftrace_init_func {
70938715b108SJoel Fernandes struct list_head list;
70948715b108SJoel Fernandes unsigned long ip;
70958715b108SJoel Fernandes };
70968715b108SJoel Fernandes
70978715b108SJoel Fernandes /* Clear any init ips from hashes */
70988715b108SJoel Fernandes static void
clear_func_from_hash(struct ftrace_init_func * func,struct ftrace_hash * hash)70998715b108SJoel Fernandes clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
710042c269c8SSteven Rostedt (VMware) {
71018715b108SJoel Fernandes struct ftrace_func_entry *entry;
71028715b108SJoel Fernandes
710308468754SChangbin Du entry = ftrace_lookup_ip(hash, func->ip);
71048715b108SJoel Fernandes /*
71058715b108SJoel Fernandes * Do not allow this rec to match again.
71068715b108SJoel Fernandes * Yeah, it may waste some memory, but will be removed
71078715b108SJoel Fernandes * if/when the hash is modified again.
71088715b108SJoel Fernandes */
71098715b108SJoel Fernandes if (entry)
71108715b108SJoel Fernandes entry->ip = 0;
71118715b108SJoel Fernandes }
71128715b108SJoel Fernandes
71138715b108SJoel Fernandes static void
clear_func_from_hashes(struct ftrace_init_func * func)71148715b108SJoel Fernandes clear_func_from_hashes(struct ftrace_init_func *func)
71158715b108SJoel Fernandes {
71168715b108SJoel Fernandes struct trace_array *tr;
71178715b108SJoel Fernandes
71188715b108SJoel Fernandes mutex_lock(&trace_types_lock);
71198715b108SJoel Fernandes list_for_each_entry(tr, &ftrace_trace_arrays, list) {
71208715b108SJoel Fernandes if (!tr->ops || !tr->ops->func_hash)
71218715b108SJoel Fernandes continue;
71228715b108SJoel Fernandes mutex_lock(&tr->ops->func_hash->regex_lock);
71238715b108SJoel Fernandes clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
71248715b108SJoel Fernandes clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
71258715b108SJoel Fernandes mutex_unlock(&tr->ops->func_hash->regex_lock);
71268715b108SJoel Fernandes }
71278715b108SJoel Fernandes mutex_unlock(&trace_types_lock);
71288715b108SJoel Fernandes }
71298715b108SJoel Fernandes
add_to_clear_hash_list(struct list_head * clear_list,struct dyn_ftrace * rec)71308715b108SJoel Fernandes static void add_to_clear_hash_list(struct list_head *clear_list,
71318715b108SJoel Fernandes struct dyn_ftrace *rec)
71328715b108SJoel Fernandes {
71338715b108SJoel Fernandes struct ftrace_init_func *func;
71348715b108SJoel Fernandes
71358715b108SJoel Fernandes func = kmalloc(sizeof(*func), GFP_KERNEL);
71368715b108SJoel Fernandes if (!func) {
713724589e3aSSteven Rostedt (VMware) MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
71388715b108SJoel Fernandes return;
71398715b108SJoel Fernandes }
71408715b108SJoel Fernandes
71418715b108SJoel Fernandes func->ip = rec->ip;
71428715b108SJoel Fernandes list_add(&func->list, clear_list);
71438715b108SJoel Fernandes }
71448715b108SJoel Fernandes
ftrace_free_mem(struct module * mod,void * start_ptr,void * end_ptr)7145aba4b5c2SSteven Rostedt (VMware) void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
714642c269c8SSteven Rostedt (VMware) {
71476cafbe15SSteven Rostedt (VMware) unsigned long start = (unsigned long)(start_ptr);
71486cafbe15SSteven Rostedt (VMware) unsigned long end = (unsigned long)(end_ptr);
714942c269c8SSteven Rostedt (VMware) struct ftrace_page **last_pg = &ftrace_pages_start;
71507b4881daSZheng Yejian struct ftrace_page *tmp_page = NULL;
715142c269c8SSteven Rostedt (VMware) struct ftrace_page *pg;
715242c269c8SSteven Rostedt (VMware) struct dyn_ftrace *rec;
715342c269c8SSteven Rostedt (VMware) struct dyn_ftrace key;
7154aba4b5c2SSteven Rostedt (VMware) struct ftrace_mod_map *mod_map = NULL;
71558715b108SJoel Fernandes struct ftrace_init_func *func, *func_next;
71562a30dbcbSRuan Jinjie LIST_HEAD(clear_hash);
71578715b108SJoel Fernandes
715842c269c8SSteven Rostedt (VMware) key.ip = start;
715942c269c8SSteven Rostedt (VMware) key.flags = end; /* overload flags, as it is unsigned long */
716042c269c8SSteven Rostedt (VMware)
716142c269c8SSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
716242c269c8SSteven Rostedt (VMware)
7163aba4b5c2SSteven Rostedt (VMware) /*
7164aba4b5c2SSteven Rostedt (VMware) * If we are freeing module init memory, then check if
7165aba4b5c2SSteven Rostedt (VMware) * any tracer is active. If so, we need to save a mapping of
7166aba4b5c2SSteven Rostedt (VMware) * the module functions being freed with the address.
7167aba4b5c2SSteven Rostedt (VMware) */
7168aba4b5c2SSteven Rostedt (VMware) if (mod && ftrace_ops_list != &ftrace_list_end)
7169aba4b5c2SSteven Rostedt (VMware) mod_map = allocate_ftrace_mod_map(mod, start, end);
7170aba4b5c2SSteven Rostedt (VMware)
717142c269c8SSteven Rostedt (VMware) for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
717242c269c8SSteven Rostedt (VMware) if (end < pg->records[0].ip ||
717342c269c8SSteven Rostedt (VMware) start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
717442c269c8SSteven Rostedt (VMware) continue;
717542c269c8SSteven Rostedt (VMware) again:
717642c269c8SSteven Rostedt (VMware) rec = bsearch(&key, pg->records, pg->index,
717742c269c8SSteven Rostedt (VMware) sizeof(struct dyn_ftrace),
717842c269c8SSteven Rostedt (VMware) ftrace_cmp_recs);
717942c269c8SSteven Rostedt (VMware) if (!rec)
718042c269c8SSteven Rostedt (VMware) continue;
7181aba4b5c2SSteven Rostedt (VMware)
71828715b108SJoel Fernandes /* rec will be cleared from hashes after ftrace_lock unlock */
71838715b108SJoel Fernandes add_to_clear_hash_list(&clear_hash, rec);
71848715b108SJoel Fernandes
7185aba4b5c2SSteven Rostedt (VMware) if (mod_map)
7186aba4b5c2SSteven Rostedt (VMware) save_ftrace_mod_rec(mod_map, rec);
7187aba4b5c2SSteven Rostedt (VMware)
718842c269c8SSteven Rostedt (VMware) pg->index--;
71894ec78467SSteven Rostedt (VMware) ftrace_update_tot_cnt--;
719042c269c8SSteven Rostedt (VMware) if (!pg->index) {
719142c269c8SSteven Rostedt (VMware) *last_pg = pg->next;
71927b4881daSZheng Yejian pg->next = tmp_page;
71937b4881daSZheng Yejian tmp_page = pg;
719442c269c8SSteven Rostedt (VMware) pg = container_of(last_pg, struct ftrace_page, next);
719542c269c8SSteven Rostedt (VMware) if (!(*last_pg))
719642c269c8SSteven Rostedt (VMware) ftrace_pages = pg;
719742c269c8SSteven Rostedt (VMware) continue;
719842c269c8SSteven Rostedt (VMware) }
719942c269c8SSteven Rostedt (VMware) memmove(rec, rec + 1,
720042c269c8SSteven Rostedt (VMware) (pg->index - (rec - pg->records)) * sizeof(*rec));
720142c269c8SSteven Rostedt (VMware) /* More than one function may be in this block */
720242c269c8SSteven Rostedt (VMware) goto again;
720342c269c8SSteven Rostedt (VMware) }
720442c269c8SSteven Rostedt (VMware) mutex_unlock(&ftrace_lock);
72058715b108SJoel Fernandes
72068715b108SJoel Fernandes list_for_each_entry_safe(func, func_next, &clear_hash, list) {
72078715b108SJoel Fernandes clear_func_from_hashes(func);
72088715b108SJoel Fernandes kfree(func);
72098715b108SJoel Fernandes }
72107b4881daSZheng Yejian /* Need to synchronize with ftrace_location_range() */
72117b4881daSZheng Yejian if (tmp_page) {
72127b4881daSZheng Yejian synchronize_rcu();
72137b4881daSZheng Yejian ftrace_free_pages(tmp_page);
72147b4881daSZheng Yejian }
721542c269c8SSteven Rostedt (VMware) }
721642c269c8SSteven Rostedt (VMware)
ftrace_free_init_mem(void)72176cafbe15SSteven Rostedt (VMware) void __init ftrace_free_init_mem(void)
72186cafbe15SSteven Rostedt (VMware) {
72196cafbe15SSteven Rostedt (VMware) void *start = (void *)(&__init_begin);
72206cafbe15SSteven Rostedt (VMware) void *end = (void *)(&__init_end);
72216cafbe15SSteven Rostedt (VMware)
7222380af29bSSteven Rostedt (Google) ftrace_boot_snapshot();
7223380af29bSSteven Rostedt (Google)
7224aba4b5c2SSteven Rostedt (VMware) ftrace_free_mem(NULL, start, end);
722593eb677dSSteven Rostedt }
722693eb677dSSteven Rostedt
ftrace_dyn_arch_init(void)72276644c654SWeizhao Ouyang int __init __weak ftrace_dyn_arch_init(void)
72286644c654SWeizhao Ouyang {
72296644c654SWeizhao Ouyang return 0;
72306644c654SWeizhao Ouyang }
72316644c654SWeizhao Ouyang
ftrace_init(void)723268bf21aaSSteven Rostedt void __init ftrace_init(void)
723368bf21aaSSteven Rostedt {
72341dc43cf0SJiri Slaby extern unsigned long __start_mcount_loc[];
72351dc43cf0SJiri Slaby extern unsigned long __stop_mcount_loc[];
72363a36cb11SJiri Slaby unsigned long count, flags;
723768bf21aaSSteven Rostedt int ret;
723868bf21aaSSteven Rostedt
723968bf21aaSSteven Rostedt local_irq_save(flags);
72403a36cb11SJiri Slaby ret = ftrace_dyn_arch_init();
724168bf21aaSSteven Rostedt local_irq_restore(flags);
7242af64a7cbSJiri Slaby if (ret)
724368bf21aaSSteven Rostedt goto failed;
724468bf21aaSSteven Rostedt
724568bf21aaSSteven Rostedt count = __stop_mcount_loc - __start_mcount_loc;
7246c867ccd8SJiri Slaby if (!count) {
7247c867ccd8SJiri Slaby pr_info("ftrace: No functions to be traced?\n");
724868bf21aaSSteven Rostedt goto failed;
7249c867ccd8SJiri Slaby }
7250c867ccd8SJiri Slaby
7251c867ccd8SJiri Slaby pr_info("ftrace: allocating %ld entries in %ld pages\n",
725208948caeSWang Wensheng count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
725368bf21aaSSteven Rostedt
72545cb084bbSJiri Olsa ret = ftrace_process_locs(NULL,
725531e88909SSteven Rostedt __start_mcount_loc,
725668bf21aaSSteven Rostedt __stop_mcount_loc);
72572889c658SYuntao Wang if (ret) {
72582889c658SYuntao Wang pr_warn("ftrace: failed to allocate entries for functions\n");
72592889c658SYuntao Wang goto failed;
72602889c658SYuntao Wang }
726168bf21aaSSteven Rostedt
7262da537f0aSSteven Rostedt (VMware) pr_info("ftrace: allocated %ld pages with %ld groups\n",
7263da537f0aSSteven Rostedt (VMware) ftrace_number_of_pages, ftrace_number_of_groups);
7264da537f0aSSteven Rostedt (VMware)
72652889c658SYuntao Wang last_ftrace_enabled = ftrace_enabled = 1;
72662889c658SYuntao Wang
72672af15d6aSSteven Rostedt set_ftrace_early_filters();
72682af15d6aSSteven Rostedt
726968bf21aaSSteven Rostedt return;
727068bf21aaSSteven Rostedt failed:
727168bf21aaSSteven Rostedt ftrace_disabled = 1;
727268bf21aaSSteven Rostedt }
727368bf21aaSSteven Rostedt
7274f3bea491SSteven Rostedt (Red Hat) /* Do nothing if arch does not support this */
arch_ftrace_update_trampoline(struct ftrace_ops * ops)7275f3bea491SSteven Rostedt (Red Hat) void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7276f3bea491SSteven Rostedt (Red Hat) {
7277f3bea491SSteven Rostedt (Red Hat) }
7278f3bea491SSteven Rostedt (Red Hat)
ftrace_update_trampoline(struct ftrace_ops * ops)7279f3bea491SSteven Rostedt (Red Hat) static void ftrace_update_trampoline(struct ftrace_ops *ops)
7280f3bea491SSteven Rostedt (Red Hat) {
7281fc0ea795SAdrian Hunter unsigned long trampoline = ops->trampoline;
7282fc0ea795SAdrian Hunter
7283f3bea491SSteven Rostedt (Red Hat) arch_ftrace_update_trampoline(ops);
7284fc0ea795SAdrian Hunter if (ops->trampoline && ops->trampoline != trampoline &&
7285dd9ddf46SAdrian Hunter (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7286dd9ddf46SAdrian Hunter /* Add to kallsyms before the perf events */
7287fc0ea795SAdrian Hunter ftrace_add_trampoline_to_kallsyms(ops);
7288dd9ddf46SAdrian Hunter perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7289dd9ddf46SAdrian Hunter ops->trampoline, ops->trampoline_size, false,
7290dd9ddf46SAdrian Hunter FTRACE_TRAMPOLINE_SYM);
7291548e1f6cSAdrian Hunter /*
7292548e1f6cSAdrian Hunter * Record the perf text poke event after the ksymbol register
7293548e1f6cSAdrian Hunter * event.
7294548e1f6cSAdrian Hunter */
7295548e1f6cSAdrian Hunter perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7296548e1f6cSAdrian Hunter (void *)ops->trampoline,
7297548e1f6cSAdrian Hunter ops->trampoline_size);
7298dd9ddf46SAdrian Hunter }
7299f3bea491SSteven Rostedt (Red Hat) }
7300f3bea491SSteven Rostedt (Red Hat)
ftrace_init_trace_array(struct trace_array * tr)730104ec7bb6SSteven Rostedt (VMware) void ftrace_init_trace_array(struct trace_array *tr)
730204ec7bb6SSteven Rostedt (VMware) {
730304ec7bb6SSteven Rostedt (VMware) INIT_LIST_HEAD(&tr->func_probes);
7304673feb9dSSteven Rostedt (VMware) INIT_LIST_HEAD(&tr->mod_trace);
7305673feb9dSSteven Rostedt (VMware) INIT_LIST_HEAD(&tr->mod_notrace);
730604ec7bb6SSteven Rostedt (VMware) }
73073d083395SSteven Rostedt #else
73080b6e4d56SFrederic Weisbecker
73093306fc4aSSteven Rostedt (VMware) struct ftrace_ops global_ops = {
7310bd69c30bSSteven Rostedt .func = ftrace_stub,
7311a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_INITIALIZED |
7312e3eea140SSteven Rostedt (Red Hat) FTRACE_OPS_FL_PID,
7313bd69c30bSSteven Rostedt };
7314bd69c30bSSteven Rostedt
ftrace_nodyn_init(void)73150b6e4d56SFrederic Weisbecker static int __init ftrace_nodyn_init(void)
73160b6e4d56SFrederic Weisbecker {
73170b6e4d56SFrederic Weisbecker ftrace_enabled = 1;
73180b6e4d56SFrederic Weisbecker return 0;
73190b6e4d56SFrederic Weisbecker }
73206f415672SSteven Rostedt core_initcall(ftrace_nodyn_init);
73210b6e4d56SFrederic Weisbecker
ftrace_init_dyn_tracefs(struct dentry * d_tracer)73228434dc93SSteven Rostedt (Red Hat) static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_all(int command)7323e1effa01SSteven Rostedt (Red Hat) static inline void ftrace_startup_all(int command) { }
73248a56d776SSteven Rostedt (Red Hat)
ftrace_update_trampoline(struct ftrace_ops * ops)7325f3bea491SSteven Rostedt (Red Hat) static void ftrace_update_trampoline(struct ftrace_ops *ops)
7326f3bea491SSteven Rostedt (Red Hat) {
7327f3bea491SSteven Rostedt (Red Hat) }
7328f3bea491SSteven Rostedt (Red Hat)
73293d083395SSteven Rostedt #endif /* CONFIG_DYNAMIC_FTRACE */
73303d083395SSteven Rostedt
ftrace_init_global_array_ops(struct trace_array * tr)73314104d326SSteven Rostedt (Red Hat) __init void ftrace_init_global_array_ops(struct trace_array *tr)
73324104d326SSteven Rostedt (Red Hat) {
73334104d326SSteven Rostedt (Red Hat) tr->ops = &global_ops;
73344104d326SSteven Rostedt (Red Hat) tr->ops->private = tr;
733504ec7bb6SSteven Rostedt (VMware) ftrace_init_trace_array(tr);
73364104d326SSteven Rostedt (Red Hat) }
73374104d326SSteven Rostedt (Red Hat)
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)73384104d326SSteven Rostedt (Red Hat) void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
73394104d326SSteven Rostedt (Red Hat) {
73404104d326SSteven Rostedt (Red Hat) /* If we filter on pids, update to use the pid function */
73414104d326SSteven Rostedt (Red Hat) if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
73424104d326SSteven Rostedt (Red Hat) if (WARN_ON(tr->ops->func != ftrace_stub))
73434104d326SSteven Rostedt (Red Hat) printk("ftrace ops had %pS for function\n",
73444104d326SSteven Rostedt (Red Hat) tr->ops->func);
73454104d326SSteven Rostedt (Red Hat) }
73464104d326SSteven Rostedt (Red Hat) tr->ops->func = func;
73474104d326SSteven Rostedt (Red Hat) tr->ops->private = tr;
73484104d326SSteven Rostedt (Red Hat) }
73494104d326SSteven Rostedt (Red Hat)
ftrace_reset_array_ops(struct trace_array * tr)73504104d326SSteven Rostedt (Red Hat) void ftrace_reset_array_ops(struct trace_array *tr)
73514104d326SSteven Rostedt (Red Hat) {
73524104d326SSteven Rostedt (Red Hat) tr->ops->func = ftrace_stub;
73534104d326SSteven Rostedt (Red Hat) }
73544104d326SSteven Rostedt (Red Hat)
7355fabe38abSMasami Hiramatsu static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct ftrace_regs * fregs)73562f5f6ad9SSteven Rostedt __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7357d19ad077SSteven Rostedt (VMware) struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7358b848914cSSteven Rostedt {
7359d19ad077SSteven Rostedt (VMware) struct pt_regs *regs = ftrace_get_regs(fregs);
7360cdbe61bfSSteven Rostedt struct ftrace_ops *op;
7361edc15cafSSteven Rostedt int bit;
7362b848914cSSteven Rostedt
7363ce5e4803S王贇 /*
7364ce5e4803S王贇 * The ftrace_test_and_set_recursion() will disable preemption,
7365ce5e4803S王贇 * which is required since some of the ops may be dynamically
7366ce5e4803S王贇 * allocated, they must be freed after a synchronize_rcu().
7367ce5e4803S王贇 */
7368ed65df63SSteven Rostedt (VMware) bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7369edc15cafSSteven Rostedt if (bit < 0)
7370b1cff0adSSteven Rostedt return;
7371b1cff0adSSteven Rostedt
73720a016409SSteven Rostedt do_for_each_ftrace_op(op, ftrace_ops_list) {
73732fa717a0SSteven Rostedt (VMware) /* Stub functions don't need to be called nor tested */
73742fa717a0SSteven Rostedt (VMware) if (op->flags & FTRACE_OPS_FL_STUB)
73752fa717a0SSteven Rostedt (VMware) continue;
7376ba27f2bcSSteven Rostedt (Red Hat) /*
7377ba27f2bcSSteven Rostedt (Red Hat) * Check the following for each ops before calling their func:
7378ba27f2bcSSteven Rostedt (Red Hat) * if RCU flag is set, then rcu_is_watching() must be true
7379ba27f2bcSSteven Rostedt (Red Hat) * Otherwise test if the ip matches the ops filter
7380ba27f2bcSSteven Rostedt (Red Hat) *
7381ba27f2bcSSteven Rostedt (Red Hat) * If any of the above fails then the op->func() is not executed.
7382ba27f2bcSSteven Rostedt (Red Hat) */
7383ba27f2bcSSteven Rostedt (Red Hat) if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7384ba27f2bcSSteven Rostedt (Red Hat) ftrace_ops_test(op, ip, regs)) {
73851d48d596SSteven Rostedt (Red Hat) if (FTRACE_WARN_ON(!op->func)) {
73861d48d596SSteven Rostedt (Red Hat) pr_warn("op=%p %pS\n", op, op);
73874104d326SSteven Rostedt (Red Hat) goto out;
73884104d326SSteven Rostedt (Red Hat) }
7389d19ad077SSteven Rostedt (VMware) op->func(ip, parent_ip, op, fregs);
73904104d326SSteven Rostedt (Red Hat) }
73910a016409SSteven Rostedt } while_for_each_ftrace_op(op);
73924104d326SSteven Rostedt (Red Hat) out:
7393edc15cafSSteven Rostedt trace_clear_recursion(bit);
7394b848914cSSteven Rostedt }
7395b848914cSSteven Rostedt
73962f5f6ad9SSteven Rostedt /*
73972f5f6ad9SSteven Rostedt * Some archs only support passing ip and parent_ip. Even though
73982f5f6ad9SSteven Rostedt * the list function ignores the op parameter, we do not want any
73992f5f6ad9SSteven Rostedt * C side effects, where a function is called without the caller
74002f5f6ad9SSteven Rostedt * sending a third parameter.
7401a1e2e31dSSteven Rostedt * Archs are to support both the regs and ftrace_ops at the same time.
7402a1e2e31dSSteven Rostedt * If they support ftrace_ops, it is assumed they support regs.
7403a1e2e31dSSteven Rostedt * If call backs want to use regs, they must either check for regs
740406aeaaeaSMasami Hiramatsu * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
740506aeaaeaSMasami Hiramatsu * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7406a1e2e31dSSteven Rostedt * An architecture can pass partial regs with ftrace_ops and still
7407b8ec330aSLi Bin * set the ARCH_SUPPORTS_FTRACE_OPS.
740834cdd18bSSteven Rostedt (VMware) *
740934cdd18bSSteven Rostedt (VMware) * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
741034cdd18bSSteven Rostedt (VMware) * arch_ftrace_ops_list_func.
74112f5f6ad9SSteven Rostedt */
74122f5f6ad9SSteven Rostedt #if ARCH_SUPPORTS_FTRACE_OPS
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)741334cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7414d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
74152f5f6ad9SSteven Rostedt {
7416d19ad077SSteven Rostedt (VMware) __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
74172f5f6ad9SSteven Rostedt }
74182f5f6ad9SSteven Rostedt #else
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip)741934cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
74202f5f6ad9SSteven Rostedt {
7421a1e2e31dSSteven Rostedt __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
74222f5f6ad9SSteven Rostedt }
74232f5f6ad9SSteven Rostedt #endif
742434cdd18bSSteven Rostedt (VMware) NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
74252f5f6ad9SSteven Rostedt
7426f1ff6348SSteven Rostedt (Red Hat) /*
7427f1ff6348SSteven Rostedt (Red Hat) * If there's only one function registered but it does not support
742878a01febSZheng Yejian * recursion, needs RCU protection, then this function will be called
742978a01febSZheng Yejian * by the mcount trampoline.
7430f1ff6348SSteven Rostedt (Red Hat) */
ftrace_ops_assist_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7431c68c0fa2SSteven Rostedt (Red Hat) static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7432d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
7433f1ff6348SSteven Rostedt (Red Hat) {
7434f1ff6348SSteven Rostedt (Red Hat) int bit;
7435f1ff6348SSteven Rostedt (Red Hat)
7436ed65df63SSteven Rostedt (VMware) bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7437f1ff6348SSteven Rostedt (Red Hat) if (bit < 0)
7438f1ff6348SSteven Rostedt (Red Hat) return;
7439f1ff6348SSteven Rostedt (Red Hat)
7440b40341faSSteven Rostedt (VMware) if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7441d19ad077SSteven Rostedt (VMware) op->func(ip, parent_ip, op, fregs);
7442c68c0fa2SSteven Rostedt (Red Hat)
7443f1ff6348SSteven Rostedt (Red Hat) trace_clear_recursion(bit);
7444f1ff6348SSteven Rostedt (Red Hat) }
7445fabe38abSMasami Hiramatsu NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7446f1ff6348SSteven Rostedt (Red Hat)
744787354059SSteven Rostedt (Red Hat) /**
744887354059SSteven Rostedt (Red Hat) * ftrace_ops_get_func - get the function a trampoline should call
744987354059SSteven Rostedt (Red Hat) * @ops: the ops to get the function for
745087354059SSteven Rostedt (Red Hat) *
745187354059SSteven Rostedt (Red Hat) * Normally the mcount trampoline will call the ops->func, but there
745287354059SSteven Rostedt (Red Hat) * are times that it should not. For example, if the ops does not
745387354059SSteven Rostedt (Red Hat) * have its own recursion protection, then it should call the
74543a150df9SChunyu Hu * ftrace_ops_assist_func() instead.
745587354059SSteven Rostedt (Red Hat) *
745687354059SSteven Rostedt (Red Hat) * Returns the function that the trampoline should call for @ops.
745787354059SSteven Rostedt (Red Hat) */
ftrace_ops_get_func(struct ftrace_ops * ops)745887354059SSteven Rostedt (Red Hat) ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
745987354059SSteven Rostedt (Red Hat) {
746087354059SSteven Rostedt (Red Hat) /*
7461a25d036dSSteven Rostedt (VMware) * If the function does not handle recursion or needs to be RCU safe,
7462a25d036dSSteven Rostedt (VMware) * then we need to call the assist handler.
746387354059SSteven Rostedt (Red Hat) */
7464a25d036dSSteven Rostedt (VMware) if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7465a25d036dSSteven Rostedt (VMware) FTRACE_OPS_FL_RCU))
7466c68c0fa2SSteven Rostedt (Red Hat) return ftrace_ops_assist_func;
746787354059SSteven Rostedt (Red Hat)
746887354059SSteven Rostedt (Red Hat) return ops->func;
746987354059SSteven Rostedt (Red Hat) }
747087354059SSteven Rostedt (Red Hat)
7471345ddcc8SSteven Rostedt (Red Hat) static void
ftrace_filter_pid_sched_switch_probe(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)7472345ddcc8SSteven Rostedt (Red Hat) ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7473fa2c3254SValentin Schneider struct task_struct *prev,
74749c2136beSDelyan Kratunov struct task_struct *next,
74759c2136beSDelyan Kratunov unsigned int prev_state)
7476e32d8956SSteven Rostedt {
7477345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = data;
7478345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list;
7479b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *no_pid_list;
7480345ddcc8SSteven Rostedt (Red Hat)
7481345ddcc8SSteven Rostedt (Red Hat) pid_list = rcu_dereference_sched(tr->function_pids);
7482b3b1e6edSSteven Rostedt (VMware) no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7483345ddcc8SSteven Rostedt (Red Hat)
7484b3b1e6edSSteven Rostedt (VMware) if (trace_ignore_this_task(pid_list, no_pid_list, next))
74851c5eb448SSteven Rostedt (VMware) this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7486717e3f5eSSteven Rostedt (VMware) FTRACE_PID_IGNORE);
7487717e3f5eSSteven Rostedt (VMware) else
7488717e3f5eSSteven Rostedt (VMware) this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7489717e3f5eSSteven Rostedt (VMware) next->pid);
7490345ddcc8SSteven Rostedt (Red Hat) }
7491345ddcc8SSteven Rostedt (Red Hat)
74921e10486fSNamhyung Kim static void
ftrace_pid_follow_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)74931e10486fSNamhyung Kim ftrace_pid_follow_sched_process_fork(void *data,
74941e10486fSNamhyung Kim struct task_struct *self,
74951e10486fSNamhyung Kim struct task_struct *task)
74961e10486fSNamhyung Kim {
74971e10486fSNamhyung Kim struct trace_pid_list *pid_list;
74981e10486fSNamhyung Kim struct trace_array *tr = data;
74991e10486fSNamhyung Kim
75001e10486fSNamhyung Kim pid_list = rcu_dereference_sched(tr->function_pids);
75011e10486fSNamhyung Kim trace_filter_add_remove_task(pid_list, self, task);
7502b3b1e6edSSteven Rostedt (VMware)
7503b3b1e6edSSteven Rostedt (VMware) pid_list = rcu_dereference_sched(tr->function_no_pids);
7504b3b1e6edSSteven Rostedt (VMware) trace_filter_add_remove_task(pid_list, self, task);
75051e10486fSNamhyung Kim }
75061e10486fSNamhyung Kim
75071e10486fSNamhyung Kim static void
ftrace_pid_follow_sched_process_exit(void * data,struct task_struct * task)75081e10486fSNamhyung Kim ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
75091e10486fSNamhyung Kim {
75101e10486fSNamhyung Kim struct trace_pid_list *pid_list;
75111e10486fSNamhyung Kim struct trace_array *tr = data;
75121e10486fSNamhyung Kim
75131e10486fSNamhyung Kim pid_list = rcu_dereference_sched(tr->function_pids);
75141e10486fSNamhyung Kim trace_filter_add_remove_task(pid_list, NULL, task);
7515b3b1e6edSSteven Rostedt (VMware)
7516b3b1e6edSSteven Rostedt (VMware) pid_list = rcu_dereference_sched(tr->function_no_pids);
7517b3b1e6edSSteven Rostedt (VMware) trace_filter_add_remove_task(pid_list, NULL, task);
75181e10486fSNamhyung Kim }
75191e10486fSNamhyung Kim
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)75201e10486fSNamhyung Kim void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
75211e10486fSNamhyung Kim {
75221e10486fSNamhyung Kim if (enable) {
75231e10486fSNamhyung Kim register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
75241e10486fSNamhyung Kim tr);
7525afcab636SSteven Rostedt (VMware) register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
75261e10486fSNamhyung Kim tr);
75271e10486fSNamhyung Kim } else {
75281e10486fSNamhyung Kim unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
75291e10486fSNamhyung Kim tr);
7530afcab636SSteven Rostedt (VMware) unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
75311e10486fSNamhyung Kim tr);
75321e10486fSNamhyung Kim }
75331e10486fSNamhyung Kim }
75341e10486fSNamhyung Kim
clear_ftrace_pids(struct trace_array * tr,int type)7535b3b1e6edSSteven Rostedt (VMware) static void clear_ftrace_pids(struct trace_array *tr, int type)
7536345ddcc8SSteven Rostedt (Red Hat) {
7537345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list;
7538b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *no_pid_list;
7539e32d8956SSteven Rostedt int cpu;
7540e32d8956SSteven Rostedt
7541345ddcc8SSteven Rostedt (Red Hat) pid_list = rcu_dereference_protected(tr->function_pids,
7542345ddcc8SSteven Rostedt (Red Hat) lockdep_is_held(&ftrace_lock));
7543b3b1e6edSSteven Rostedt (VMware) no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7544b3b1e6edSSteven Rostedt (VMware) lockdep_is_held(&ftrace_lock));
7545b3b1e6edSSteven Rostedt (VMware)
7546b3b1e6edSSteven Rostedt (VMware) /* Make sure there's something to do */
754727683626SSteven Rostedt (VMware) if (!pid_type_enabled(type, pid_list, no_pid_list))
7548345ddcc8SSteven Rostedt (Red Hat) return;
7549345ddcc8SSteven Rostedt (Red Hat)
7550b3b1e6edSSteven Rostedt (VMware) /* See if the pids still need to be checked after this */
755127683626SSteven Rostedt (VMware) if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7552345ddcc8SSteven Rostedt (Red Hat) unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7553345ddcc8SSteven Rostedt (Red Hat) for_each_possible_cpu(cpu)
7554717e3f5eSSteven Rostedt (VMware) per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7555b3b1e6edSSteven Rostedt (VMware) }
7556345ddcc8SSteven Rostedt (Red Hat)
7557b3b1e6edSSteven Rostedt (VMware) if (type & TRACE_PIDS)
7558345ddcc8SSteven Rostedt (Red Hat) rcu_assign_pointer(tr->function_pids, NULL);
7559345ddcc8SSteven Rostedt (Red Hat)
7560b3b1e6edSSteven Rostedt (VMware) if (type & TRACE_NO_PIDS)
7561b3b1e6edSSteven Rostedt (VMware) rcu_assign_pointer(tr->function_no_pids, NULL);
7562b3b1e6edSSteven Rostedt (VMware)
7563345ddcc8SSteven Rostedt (Red Hat) /* Wait till all users are no longer using pid filtering */
756474401729SPaul E. McKenney synchronize_rcu();
7565345ddcc8SSteven Rostedt (Red Hat)
7566b3b1e6edSSteven Rostedt (VMware) if ((type & TRACE_PIDS) && pid_list)
75676954e415SSteven Rostedt (VMware) trace_pid_list_free(pid_list);
7568b3b1e6edSSteven Rostedt (VMware)
7569b3b1e6edSSteven Rostedt (VMware) if ((type & TRACE_NO_PIDS) && no_pid_list)
75706954e415SSteven Rostedt (VMware) trace_pid_list_free(no_pid_list);
7571e32d8956SSteven Rostedt }
7572e32d8956SSteven Rostedt
ftrace_clear_pids(struct trace_array * tr)7573d879d0b8SNamhyung Kim void ftrace_clear_pids(struct trace_array *tr)
7574d879d0b8SNamhyung Kim {
7575d879d0b8SNamhyung Kim mutex_lock(&ftrace_lock);
7576d879d0b8SNamhyung Kim
7577b3b1e6edSSteven Rostedt (VMware) clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7578d879d0b8SNamhyung Kim
7579d879d0b8SNamhyung Kim mutex_unlock(&ftrace_lock);
7580d879d0b8SNamhyung Kim }
7581d879d0b8SNamhyung Kim
ftrace_pid_reset(struct trace_array * tr,int type)7582b3b1e6edSSteven Rostedt (VMware) static void ftrace_pid_reset(struct trace_array *tr, int type)
7583e32d8956SSteven Rostedt {
7584756d17eeSjolsa@redhat.com mutex_lock(&ftrace_lock);
7585b3b1e6edSSteven Rostedt (VMware) clear_ftrace_pids(tr, type);
7586756d17eeSjolsa@redhat.com
7587756d17eeSjolsa@redhat.com ftrace_update_pid_func();
7588e1effa01SSteven Rostedt (Red Hat) ftrace_startup_all(0);
7589756d17eeSjolsa@redhat.com
7590756d17eeSjolsa@redhat.com mutex_unlock(&ftrace_lock);
7591756d17eeSjolsa@redhat.com }
7592756d17eeSjolsa@redhat.com
7593345ddcc8SSteven Rostedt (Red Hat) /* Greater than any max PID */
7594345ddcc8SSteven Rostedt (Red Hat) #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7595345ddcc8SSteven Rostedt (Red Hat)
fpid_start(struct seq_file * m,loff_t * pos)7596756d17eeSjolsa@redhat.com static void *fpid_start(struct seq_file *m, loff_t *pos)
7597345ddcc8SSteven Rostedt (Red Hat) __acquires(RCU)
7598756d17eeSjolsa@redhat.com {
7599345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list;
7600345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = m->private;
7601345ddcc8SSteven Rostedt (Red Hat)
7602756d17eeSjolsa@redhat.com mutex_lock(&ftrace_lock);
7603345ddcc8SSteven Rostedt (Red Hat) rcu_read_lock_sched();
7604756d17eeSjolsa@redhat.com
7605345ddcc8SSteven Rostedt (Red Hat) pid_list = rcu_dereference_sched(tr->function_pids);
7606756d17eeSjolsa@redhat.com
7607345ddcc8SSteven Rostedt (Red Hat) if (!pid_list)
7608345ddcc8SSteven Rostedt (Red Hat) return !(*pos) ? FTRACE_NO_PIDS : NULL;
7609345ddcc8SSteven Rostedt (Red Hat)
7610345ddcc8SSteven Rostedt (Red Hat) return trace_pid_start(pid_list, pos);
7611756d17eeSjolsa@redhat.com }
7612756d17eeSjolsa@redhat.com
fpid_next(struct seq_file * m,void * v,loff_t * pos)7613756d17eeSjolsa@redhat.com static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7614756d17eeSjolsa@redhat.com {
7615345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = m->private;
7616345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7617345ddcc8SSteven Rostedt (Red Hat)
7618e4075e8bSVasily Averin if (v == FTRACE_NO_PIDS) {
7619e4075e8bSVasily Averin (*pos)++;
7620756d17eeSjolsa@redhat.com return NULL;
7621e4075e8bSVasily Averin }
7622345ddcc8SSteven Rostedt (Red Hat) return trace_pid_next(pid_list, v, pos);
7623756d17eeSjolsa@redhat.com }
7624756d17eeSjolsa@redhat.com
fpid_stop(struct seq_file * m,void * p)7625756d17eeSjolsa@redhat.com static void fpid_stop(struct seq_file *m, void *p)
7626345ddcc8SSteven Rostedt (Red Hat) __releases(RCU)
7627756d17eeSjolsa@redhat.com {
7628345ddcc8SSteven Rostedt (Red Hat) rcu_read_unlock_sched();
7629756d17eeSjolsa@redhat.com mutex_unlock(&ftrace_lock);
7630756d17eeSjolsa@redhat.com }
7631756d17eeSjolsa@redhat.com
fpid_show(struct seq_file * m,void * v)7632756d17eeSjolsa@redhat.com static int fpid_show(struct seq_file *m, void *v)
7633756d17eeSjolsa@redhat.com {
7634345ddcc8SSteven Rostedt (Red Hat) if (v == FTRACE_NO_PIDS) {
7635fa6f0cc7SRasmus Villemoes seq_puts(m, "no pid\n");
7636756d17eeSjolsa@redhat.com return 0;
7637756d17eeSjolsa@redhat.com }
7638756d17eeSjolsa@redhat.com
7639345ddcc8SSteven Rostedt (Red Hat) return trace_pid_show(m, v);
7640756d17eeSjolsa@redhat.com }
7641756d17eeSjolsa@redhat.com
7642756d17eeSjolsa@redhat.com static const struct seq_operations ftrace_pid_sops = {
7643756d17eeSjolsa@redhat.com .start = fpid_start,
7644756d17eeSjolsa@redhat.com .next = fpid_next,
7645756d17eeSjolsa@redhat.com .stop = fpid_stop,
7646756d17eeSjolsa@redhat.com .show = fpid_show,
7647756d17eeSjolsa@redhat.com };
7648756d17eeSjolsa@redhat.com
fnpid_start(struct seq_file * m,loff_t * pos)7649b3b1e6edSSteven Rostedt (VMware) static void *fnpid_start(struct seq_file *m, loff_t *pos)
7650b3b1e6edSSteven Rostedt (VMware) __acquires(RCU)
7651756d17eeSjolsa@redhat.com {
7652b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *pid_list;
7653b3b1e6edSSteven Rostedt (VMware) struct trace_array *tr = m->private;
7654b3b1e6edSSteven Rostedt (VMware)
7655b3b1e6edSSteven Rostedt (VMware) mutex_lock(&ftrace_lock);
7656b3b1e6edSSteven Rostedt (VMware) rcu_read_lock_sched();
7657b3b1e6edSSteven Rostedt (VMware)
7658b3b1e6edSSteven Rostedt (VMware) pid_list = rcu_dereference_sched(tr->function_no_pids);
7659b3b1e6edSSteven Rostedt (VMware)
7660b3b1e6edSSteven Rostedt (VMware) if (!pid_list)
7661b3b1e6edSSteven Rostedt (VMware) return !(*pos) ? FTRACE_NO_PIDS : NULL;
7662b3b1e6edSSteven Rostedt (VMware)
7663b3b1e6edSSteven Rostedt (VMware) return trace_pid_start(pid_list, pos);
7664b3b1e6edSSteven Rostedt (VMware) }
7665b3b1e6edSSteven Rostedt (VMware)
fnpid_next(struct seq_file * m,void * v,loff_t * pos)7666b3b1e6edSSteven Rostedt (VMware) static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7667b3b1e6edSSteven Rostedt (VMware) {
7668b3b1e6edSSteven Rostedt (VMware) struct trace_array *tr = m->private;
7669b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7670b3b1e6edSSteven Rostedt (VMware)
7671b3b1e6edSSteven Rostedt (VMware) if (v == FTRACE_NO_PIDS) {
7672b3b1e6edSSteven Rostedt (VMware) (*pos)++;
7673b3b1e6edSSteven Rostedt (VMware) return NULL;
7674b3b1e6edSSteven Rostedt (VMware) }
7675b3b1e6edSSteven Rostedt (VMware) return trace_pid_next(pid_list, v, pos);
7676b3b1e6edSSteven Rostedt (VMware) }
7677b3b1e6edSSteven Rostedt (VMware)
7678b3b1e6edSSteven Rostedt (VMware) static const struct seq_operations ftrace_no_pid_sops = {
7679b3b1e6edSSteven Rostedt (VMware) .start = fnpid_start,
7680b3b1e6edSSteven Rostedt (VMware) .next = fnpid_next,
7681b3b1e6edSSteven Rostedt (VMware) .stop = fpid_stop,
7682b3b1e6edSSteven Rostedt (VMware) .show = fpid_show,
7683b3b1e6edSSteven Rostedt (VMware) };
7684b3b1e6edSSteven Rostedt (VMware)
pid_open(struct inode * inode,struct file * file,int type)7685b3b1e6edSSteven Rostedt (VMware) static int pid_open(struct inode *inode, struct file *file, int type)
7686b3b1e6edSSteven Rostedt (VMware) {
7687b3b1e6edSSteven Rostedt (VMware) const struct seq_operations *seq_ops;
7688345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = inode->i_private;
7689345ddcc8SSteven Rostedt (Red Hat) struct seq_file *m;
7690756d17eeSjolsa@redhat.com int ret = 0;
7691756d17eeSjolsa@redhat.com
76928530dec6SSteven Rostedt (VMware) ret = tracing_check_open_get_tr(tr);
76938530dec6SSteven Rostedt (VMware) if (ret)
76948530dec6SSteven Rostedt (VMware) return ret;
7695345ddcc8SSteven Rostedt (Red Hat)
7696756d17eeSjolsa@redhat.com if ((file->f_mode & FMODE_WRITE) &&
7697756d17eeSjolsa@redhat.com (file->f_flags & O_TRUNC))
7698b3b1e6edSSteven Rostedt (VMware) ftrace_pid_reset(tr, type);
7699756d17eeSjolsa@redhat.com
7700b3b1e6edSSteven Rostedt (VMware) switch (type) {
7701b3b1e6edSSteven Rostedt (VMware) case TRACE_PIDS:
7702b3b1e6edSSteven Rostedt (VMware) seq_ops = &ftrace_pid_sops;
7703b3b1e6edSSteven Rostedt (VMware) break;
7704b3b1e6edSSteven Rostedt (VMware) case TRACE_NO_PIDS:
7705b3b1e6edSSteven Rostedt (VMware) seq_ops = &ftrace_no_pid_sops;
7706b3b1e6edSSteven Rostedt (VMware) break;
7707026bb845SKaitao Cheng default:
7708026bb845SKaitao Cheng trace_array_put(tr);
7709026bb845SKaitao Cheng WARN_ON_ONCE(1);
7710026bb845SKaitao Cheng return -EINVAL;
7711b3b1e6edSSteven Rostedt (VMware) }
7712b3b1e6edSSteven Rostedt (VMware)
7713b3b1e6edSSteven Rostedt (VMware) ret = seq_open(file, seq_ops);
7714345ddcc8SSteven Rostedt (Red Hat) if (ret < 0) {
7715345ddcc8SSteven Rostedt (Red Hat) trace_array_put(tr);
7716345ddcc8SSteven Rostedt (Red Hat) } else {
7717345ddcc8SSteven Rostedt (Red Hat) m = file->private_data;
7718345ddcc8SSteven Rostedt (Red Hat) /* copy tr over to seq ops */
7719345ddcc8SSteven Rostedt (Red Hat) m->private = tr;
7720345ddcc8SSteven Rostedt (Red Hat) }
7721756d17eeSjolsa@redhat.com
7722756d17eeSjolsa@redhat.com return ret;
7723756d17eeSjolsa@redhat.com }
7724756d17eeSjolsa@redhat.com
7725b3b1e6edSSteven Rostedt (VMware) static int
ftrace_pid_open(struct inode * inode,struct file * file)7726b3b1e6edSSteven Rostedt (VMware) ftrace_pid_open(struct inode *inode, struct file *file)
7727b3b1e6edSSteven Rostedt (VMware) {
7728b3b1e6edSSteven Rostedt (VMware) return pid_open(inode, file, TRACE_PIDS);
7729b3b1e6edSSteven Rostedt (VMware) }
7730b3b1e6edSSteven Rostedt (VMware)
7731b3b1e6edSSteven Rostedt (VMware) static int
ftrace_no_pid_open(struct inode * inode,struct file * file)7732b3b1e6edSSteven Rostedt (VMware) ftrace_no_pid_open(struct inode *inode, struct file *file)
7733b3b1e6edSSteven Rostedt (VMware) {
7734b3b1e6edSSteven Rostedt (VMware) return pid_open(inode, file, TRACE_NO_PIDS);
7735b3b1e6edSSteven Rostedt (VMware) }
7736b3b1e6edSSteven Rostedt (VMware)
ignore_task_cpu(void * data)7737345ddcc8SSteven Rostedt (Red Hat) static void ignore_task_cpu(void *data)
7738345ddcc8SSteven Rostedt (Red Hat) {
7739345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = data;
7740345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list;
7741b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *no_pid_list;
7742345ddcc8SSteven Rostedt (Red Hat)
7743345ddcc8SSteven Rostedt (Red Hat) /*
7744345ddcc8SSteven Rostedt (Red Hat) * This function is called by on_each_cpu() while the
7745345ddcc8SSteven Rostedt (Red Hat) * event_mutex is held.
7746345ddcc8SSteven Rostedt (Red Hat) */
7747345ddcc8SSteven Rostedt (Red Hat) pid_list = rcu_dereference_protected(tr->function_pids,
7748345ddcc8SSteven Rostedt (Red Hat) mutex_is_locked(&ftrace_lock));
7749b3b1e6edSSteven Rostedt (VMware) no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7750b3b1e6edSSteven Rostedt (VMware) mutex_is_locked(&ftrace_lock));
7751345ddcc8SSteven Rostedt (Red Hat)
7752b3b1e6edSSteven Rostedt (VMware) if (trace_ignore_this_task(pid_list, no_pid_list, current))
77531c5eb448SSteven Rostedt (VMware) this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7754717e3f5eSSteven Rostedt (VMware) FTRACE_PID_IGNORE);
7755717e3f5eSSteven Rostedt (VMware) else
7756717e3f5eSSteven Rostedt (VMware) this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7757717e3f5eSSteven Rostedt (VMware) current->pid);
7758345ddcc8SSteven Rostedt (Red Hat) }
7759345ddcc8SSteven Rostedt (Red Hat)
7760df4fc315SSteven Rostedt static ssize_t
pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)7761b3b1e6edSSteven Rostedt (VMware) pid_write(struct file *filp, const char __user *ubuf,
7762b3b1e6edSSteven Rostedt (VMware) size_t cnt, loff_t *ppos, int type)
7763df4fc315SSteven Rostedt {
7764345ddcc8SSteven Rostedt (Red Hat) struct seq_file *m = filp->private_data;
7765345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = m->private;
7766b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *filtered_pids;
7767b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list *other_pids;
7768345ddcc8SSteven Rostedt (Red Hat) struct trace_pid_list *pid_list;
7769345ddcc8SSteven Rostedt (Red Hat) ssize_t ret;
7770df4fc315SSteven Rostedt
7771345ddcc8SSteven Rostedt (Red Hat) if (!cnt)
7772345ddcc8SSteven Rostedt (Red Hat) return 0;
7773df4fc315SSteven Rostedt
7774345ddcc8SSteven Rostedt (Red Hat) mutex_lock(&ftrace_lock);
7775df4fc315SSteven Rostedt
7776b3b1e6edSSteven Rostedt (VMware) switch (type) {
7777b3b1e6edSSteven Rostedt (VMware) case TRACE_PIDS:
7778345ddcc8SSteven Rostedt (Red Hat) filtered_pids = rcu_dereference_protected(tr->function_pids,
7779345ddcc8SSteven Rostedt (Red Hat) lockdep_is_held(&ftrace_lock));
7780b3b1e6edSSteven Rostedt (VMware) other_pids = rcu_dereference_protected(tr->function_no_pids,
7781b3b1e6edSSteven Rostedt (VMware) lockdep_is_held(&ftrace_lock));
7782b3b1e6edSSteven Rostedt (VMware) break;
7783b3b1e6edSSteven Rostedt (VMware) case TRACE_NO_PIDS:
7784b3b1e6edSSteven Rostedt (VMware) filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7785b3b1e6edSSteven Rostedt (VMware) lockdep_is_held(&ftrace_lock));
7786b3b1e6edSSteven Rostedt (VMware) other_pids = rcu_dereference_protected(tr->function_pids,
7787b3b1e6edSSteven Rostedt (VMware) lockdep_is_held(&ftrace_lock));
7788b3b1e6edSSteven Rostedt (VMware) break;
7789026bb845SKaitao Cheng default:
7790026bb845SKaitao Cheng ret = -EINVAL;
7791026bb845SKaitao Cheng WARN_ON_ONCE(1);
7792026bb845SKaitao Cheng goto out;
7793b3b1e6edSSteven Rostedt (VMware) }
7794345ddcc8SSteven Rostedt (Red Hat)
7795345ddcc8SSteven Rostedt (Red Hat) ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7796345ddcc8SSteven Rostedt (Red Hat) if (ret < 0)
7797345ddcc8SSteven Rostedt (Red Hat) goto out;
7798345ddcc8SSteven Rostedt (Red Hat)
7799b3b1e6edSSteven Rostedt (VMware) switch (type) {
7800b3b1e6edSSteven Rostedt (VMware) case TRACE_PIDS:
7801345ddcc8SSteven Rostedt (Red Hat) rcu_assign_pointer(tr->function_pids, pid_list);
7802b3b1e6edSSteven Rostedt (VMware) break;
7803b3b1e6edSSteven Rostedt (VMware) case TRACE_NO_PIDS:
7804b3b1e6edSSteven Rostedt (VMware) rcu_assign_pointer(tr->function_no_pids, pid_list);
7805b3b1e6edSSteven Rostedt (VMware) break;
7806b3b1e6edSSteven Rostedt (VMware) }
7807b3b1e6edSSteven Rostedt (VMware)
7808345ddcc8SSteven Rostedt (Red Hat)
7809345ddcc8SSteven Rostedt (Red Hat) if (filtered_pids) {
781074401729SPaul E. McKenney synchronize_rcu();
78116954e415SSteven Rostedt (VMware) trace_pid_list_free(filtered_pids);
7812b3b1e6edSSteven Rostedt (VMware) } else if (pid_list && !other_pids) {
7813345ddcc8SSteven Rostedt (Red Hat) /* Register a probe to set whether to ignore the tracing of a task */
7814345ddcc8SSteven Rostedt (Red Hat) register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7815345ddcc8SSteven Rostedt (Red Hat) }
7816df4fc315SSteven Rostedt
7817756d17eeSjolsa@redhat.com /*
7818345ddcc8SSteven Rostedt (Red Hat) * Ignoring of pids is done at task switch. But we have to
7819345ddcc8SSteven Rostedt (Red Hat) * check for those tasks that are currently running.
7820345ddcc8SSteven Rostedt (Red Hat) * Always do this in case a pid was appended or removed.
7821756d17eeSjolsa@redhat.com */
7822345ddcc8SSteven Rostedt (Red Hat) on_each_cpu(ignore_task_cpu, tr, 1);
7823756d17eeSjolsa@redhat.com
7824345ddcc8SSteven Rostedt (Red Hat) ftrace_update_pid_func();
7825345ddcc8SSteven Rostedt (Red Hat) ftrace_startup_all(0);
7826345ddcc8SSteven Rostedt (Red Hat) out:
7827345ddcc8SSteven Rostedt (Red Hat) mutex_unlock(&ftrace_lock);
7828345ddcc8SSteven Rostedt (Red Hat)
7829345ddcc8SSteven Rostedt (Red Hat) if (ret > 0)
7830345ddcc8SSteven Rostedt (Red Hat) *ppos += ret;
7831345ddcc8SSteven Rostedt (Red Hat)
7832df4fc315SSteven Rostedt return ret;
7833e32d8956SSteven Rostedt }
7834978f3a45SSteven Rostedt
7835b3b1e6edSSteven Rostedt (VMware) static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7836b3b1e6edSSteven Rostedt (VMware) ftrace_pid_write(struct file *filp, const char __user *ubuf,
7837b3b1e6edSSteven Rostedt (VMware) size_t cnt, loff_t *ppos)
7838b3b1e6edSSteven Rostedt (VMware) {
7839b3b1e6edSSteven Rostedt (VMware) return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7840b3b1e6edSSteven Rostedt (VMware) }
7841b3b1e6edSSteven Rostedt (VMware)
7842b3b1e6edSSteven Rostedt (VMware) static ssize_t
ftrace_no_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7843b3b1e6edSSteven Rostedt (VMware) ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7844b3b1e6edSSteven Rostedt (VMware) size_t cnt, loff_t *ppos)
7845b3b1e6edSSteven Rostedt (VMware) {
7846b3b1e6edSSteven Rostedt (VMware) return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7847b3b1e6edSSteven Rostedt (VMware) }
7848b3b1e6edSSteven Rostedt (VMware)
7849756d17eeSjolsa@redhat.com static int
ftrace_pid_release(struct inode * inode,struct file * file)7850756d17eeSjolsa@redhat.com ftrace_pid_release(struct inode *inode, struct file *file)
7851756d17eeSjolsa@redhat.com {
7852345ddcc8SSteven Rostedt (Red Hat) struct trace_array *tr = inode->i_private;
7853978f3a45SSteven Rostedt
7854345ddcc8SSteven Rostedt (Red Hat) trace_array_put(tr);
7855345ddcc8SSteven Rostedt (Red Hat)
7856345ddcc8SSteven Rostedt (Red Hat) return seq_release(inode, file);
7857df4fc315SSteven Rostedt }
7858df4fc315SSteven Rostedt
78595e2336a0SSteven Rostedt static const struct file_operations ftrace_pid_fops = {
7860756d17eeSjolsa@redhat.com .open = ftrace_pid_open,
7861df4fc315SSteven Rostedt .write = ftrace_pid_write,
7862756d17eeSjolsa@redhat.com .read = seq_read,
7863098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
7864756d17eeSjolsa@redhat.com .release = ftrace_pid_release,
7865df4fc315SSteven Rostedt };
7866df4fc315SSteven Rostedt
7867b3b1e6edSSteven Rostedt (VMware) static const struct file_operations ftrace_no_pid_fops = {
7868b3b1e6edSSteven Rostedt (VMware) .open = ftrace_no_pid_open,
7869b3b1e6edSSteven Rostedt (VMware) .write = ftrace_no_pid_write,
7870b3b1e6edSSteven Rostedt (VMware) .read = seq_read,
7871b3b1e6edSSteven Rostedt (VMware) .llseek = tracing_lseek,
7872b3b1e6edSSteven Rostedt (VMware) .release = ftrace_pid_release,
7873b3b1e6edSSteven Rostedt (VMware) };
7874b3b1e6edSSteven Rostedt (VMware)
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d_tracer)7875345ddcc8SSteven Rostedt (Red Hat) void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7876df4fc315SSteven Rostedt {
787721ccc9cdSSteven Rostedt (VMware) trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
7878345ddcc8SSteven Rostedt (Red Hat) tr, &ftrace_pid_fops);
787921ccc9cdSSteven Rostedt (VMware) trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
788021ccc9cdSSteven Rostedt (VMware) d_tracer, tr, &ftrace_no_pid_fops);
7881df4fc315SSteven Rostedt }
7882df4fc315SSteven Rostedt
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d_tracer)7883501c2375SSteven Rostedt (Red Hat) void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7884501c2375SSteven Rostedt (Red Hat) struct dentry *d_tracer)
7885501c2375SSteven Rostedt (Red Hat) {
7886501c2375SSteven Rostedt (Red Hat) /* Only the top level directory has the dyn_tracefs and profile */
7887501c2375SSteven Rostedt (Red Hat) WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7888501c2375SSteven Rostedt (Red Hat)
7889501c2375SSteven Rostedt (Red Hat) ftrace_init_dyn_tracefs(d_tracer);
7890501c2375SSteven Rostedt (Red Hat) ftrace_profile_tracefs(d_tracer);
7891501c2375SSteven Rostedt (Red Hat) }
7892501c2375SSteven Rostedt (Red Hat)
78933d083395SSteven Rostedt /**
789481adbdc0SSteven Rostedt * ftrace_kill - kill ftrace
7895a2bb6a3dSSteven Rostedt *
7896a2bb6a3dSSteven Rostedt * This function should be used by panic code. It stops ftrace
7897a2bb6a3dSSteven Rostedt * but in a not so nice way. If you need to simply kill ftrace
7898a2bb6a3dSSteven Rostedt * from a non-atomic section, use ftrace_kill.
7899a2bb6a3dSSteven Rostedt */
ftrace_kill(void)790081adbdc0SSteven Rostedt void ftrace_kill(void)
7901a2bb6a3dSSteven Rostedt {
7902a2bb6a3dSSteven Rostedt ftrace_disabled = 1;
7903a2bb6a3dSSteven Rostedt ftrace_enabled = 0;
79045ccba64aSYisheng Xie ftrace_trace_function = ftrace_stub;
7905ae0d1ea3SStephen Brennan kprobe_ftrace_kill();
7906a2bb6a3dSSteven Rostedt }
7907a2bb6a3dSSteven Rostedt
7908a2bb6a3dSSteven Rostedt /**
79096130722fSSteven Rostedt (VMware) * ftrace_is_dead - Test if ftrace is dead or not.
79106130722fSSteven Rostedt (VMware) *
79116130722fSSteven Rostedt (VMware) * Returns 1 if ftrace is "dead", zero otherwise.
7912e0a413f6SSteven Rostedt */
ftrace_is_dead(void)7913e0a413f6SSteven Rostedt int ftrace_is_dead(void)
7914e0a413f6SSteven Rostedt {
7915e0a413f6SSteven Rostedt return ftrace_disabled;
7916e0a413f6SSteven Rostedt }
7917e0a413f6SSteven Rostedt
791853cd885bSSong Liu #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
791953cd885bSSong Liu /*
792053cd885bSSong Liu * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
792153cd885bSSong Liu * it doesn't conflict with any direct ftrace_ops. If there is existing
792253cd885bSSong Liu * direct ftrace_ops on a kernel function being patched, call
792353cd885bSSong Liu * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
792453cd885bSSong Liu *
792553cd885bSSong Liu * @ops: ftrace_ops being registered.
792653cd885bSSong Liu *
792753cd885bSSong Liu * Returns:
792853cd885bSSong Liu * 0 on success;
792953cd885bSSong Liu * Negative on failure.
793053cd885bSSong Liu */
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)793153cd885bSSong Liu static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
793253cd885bSSong Liu {
793353cd885bSSong Liu struct ftrace_func_entry *entry;
793453cd885bSSong Liu struct ftrace_hash *hash;
793553cd885bSSong Liu struct ftrace_ops *op;
793653cd885bSSong Liu int size, i, ret;
793753cd885bSSong Liu
793853cd885bSSong Liu lockdep_assert_held_once(&direct_mutex);
793953cd885bSSong Liu
794053cd885bSSong Liu if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
794153cd885bSSong Liu return 0;
794253cd885bSSong Liu
794353cd885bSSong Liu hash = ops->func_hash->filter_hash;
794453cd885bSSong Liu size = 1 << hash->size_bits;
794553cd885bSSong Liu for (i = 0; i < size; i++) {
794653cd885bSSong Liu hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
794753cd885bSSong Liu unsigned long ip = entry->ip;
794853cd885bSSong Liu bool found_op = false;
794953cd885bSSong Liu
795053cd885bSSong Liu mutex_lock(&ftrace_lock);
795153cd885bSSong Liu do_for_each_ftrace_op(op, ftrace_ops_list) {
795253cd885bSSong Liu if (!(op->flags & FTRACE_OPS_FL_DIRECT))
795353cd885bSSong Liu continue;
795453cd885bSSong Liu if (ops_references_ip(op, ip)) {
795553cd885bSSong Liu found_op = true;
795653cd885bSSong Liu break;
795753cd885bSSong Liu }
795853cd885bSSong Liu } while_for_each_ftrace_op(op);
795953cd885bSSong Liu mutex_unlock(&ftrace_lock);
796053cd885bSSong Liu
796153cd885bSSong Liu if (found_op) {
796253cd885bSSong Liu if (!op->ops_func)
796353cd885bSSong Liu return -EBUSY;
796453cd885bSSong Liu
796553cd885bSSong Liu ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
796653cd885bSSong Liu if (ret)
796753cd885bSSong Liu return ret;
796853cd885bSSong Liu }
796953cd885bSSong Liu }
797053cd885bSSong Liu }
797153cd885bSSong Liu
797253cd885bSSong Liu return 0;
797353cd885bSSong Liu }
797453cd885bSSong Liu
797553cd885bSSong Liu /*
797653cd885bSSong Liu * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
797753cd885bSSong Liu * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
797853cd885bSSong Liu * ops.
797953cd885bSSong Liu */
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)798053cd885bSSong Liu static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
798153cd885bSSong Liu {
798253cd885bSSong Liu struct ftrace_func_entry *entry;
798353cd885bSSong Liu struct ftrace_hash *hash;
798453cd885bSSong Liu struct ftrace_ops *op;
798553cd885bSSong Liu int size, i;
798653cd885bSSong Liu
798753cd885bSSong Liu if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
798853cd885bSSong Liu return;
798953cd885bSSong Liu
799053cd885bSSong Liu mutex_lock(&direct_mutex);
799153cd885bSSong Liu
799253cd885bSSong Liu hash = ops->func_hash->filter_hash;
799353cd885bSSong Liu size = 1 << hash->size_bits;
799453cd885bSSong Liu for (i = 0; i < size; i++) {
799553cd885bSSong Liu hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
799653cd885bSSong Liu unsigned long ip = entry->ip;
799753cd885bSSong Liu bool found_op = false;
799853cd885bSSong Liu
799953cd885bSSong Liu mutex_lock(&ftrace_lock);
800053cd885bSSong Liu do_for_each_ftrace_op(op, ftrace_ops_list) {
800153cd885bSSong Liu if (!(op->flags & FTRACE_OPS_FL_DIRECT))
800253cd885bSSong Liu continue;
800353cd885bSSong Liu if (ops_references_ip(op, ip)) {
800453cd885bSSong Liu found_op = true;
800553cd885bSSong Liu break;
800653cd885bSSong Liu }
800753cd885bSSong Liu } while_for_each_ftrace_op(op);
800853cd885bSSong Liu mutex_unlock(&ftrace_lock);
800953cd885bSSong Liu
801053cd885bSSong Liu /* The cleanup is optional, ignore any errors */
801153cd885bSSong Liu if (found_op && op->ops_func)
801253cd885bSSong Liu op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
801353cd885bSSong Liu }
801453cd885bSSong Liu }
801553cd885bSSong Liu mutex_unlock(&direct_mutex);
801653cd885bSSong Liu }
801753cd885bSSong Liu
801853cd885bSSong Liu #define lock_direct_mutex() mutex_lock(&direct_mutex)
801953cd885bSSong Liu #define unlock_direct_mutex() mutex_unlock(&direct_mutex)
802053cd885bSSong Liu
802153cd885bSSong Liu #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
802253cd885bSSong Liu
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)802353cd885bSSong Liu static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
802453cd885bSSong Liu {
802553cd885bSSong Liu return 0;
802653cd885bSSong Liu }
802753cd885bSSong Liu
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)802853cd885bSSong Liu static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
802953cd885bSSong Liu {
803053cd885bSSong Liu }
803153cd885bSSong Liu
803253cd885bSSong Liu #define lock_direct_mutex() do { } while (0)
803353cd885bSSong Liu #define unlock_direct_mutex() do { } while (0)
803453cd885bSSong Liu
803553cd885bSSong Liu #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
803653cd885bSSong Liu
803753cd885bSSong Liu /*
803853cd885bSSong Liu * Similar to register_ftrace_function, except we don't lock direct_mutex.
803953cd885bSSong Liu */
register_ftrace_function_nolock(struct ftrace_ops * ops)804053cd885bSSong Liu static int register_ftrace_function_nolock(struct ftrace_ops *ops)
804153cd885bSSong Liu {
804253cd885bSSong Liu int ret;
804353cd885bSSong Liu
804453cd885bSSong Liu ftrace_ops_init(ops);
804553cd885bSSong Liu
804653cd885bSSong Liu mutex_lock(&ftrace_lock);
804753cd885bSSong Liu
804853cd885bSSong Liu ret = ftrace_startup(ops, 0);
804953cd885bSSong Liu
805053cd885bSSong Liu mutex_unlock(&ftrace_lock);
805153cd885bSSong Liu
805253cd885bSSong Liu return ret;
805353cd885bSSong Liu }
805453cd885bSSong Liu
8055e0a413f6SSteven Rostedt /**
80563d083395SSteven Rostedt * register_ftrace_function - register a function for profiling
805778cbc651SJiapeng Chong * @ops: ops structure that holds the function for profiling.
80583d083395SSteven Rostedt *
80593d083395SSteven Rostedt * Register a function to be called by all functions in the
80603d083395SSteven Rostedt * kernel.
80613d083395SSteven Rostedt *
80623d083395SSteven Rostedt * Note: @ops->func and all the functions it calls must be labeled
80633d083395SSteven Rostedt * with "notrace", otherwise it will go into a
80643d083395SSteven Rostedt * recursive loop.
80653d083395SSteven Rostedt */
register_ftrace_function(struct ftrace_ops * ops)80663d083395SSteven Rostedt int register_ftrace_function(struct ftrace_ops *ops)
80673d083395SSteven Rostedt {
80683b1a8f45SColin Ian King int ret;
80694eebcc81SSteven Rostedt
807053cd885bSSong Liu lock_direct_mutex();
807153cd885bSSong Liu ret = prepare_direct_functions_for_ipmodify(ops);
807253cd885bSSong Liu if (ret < 0)
807353cd885bSSong Liu goto out_unlock;
8074f04f24fbSMasami Hiramatsu
807553cd885bSSong Liu ret = register_ftrace_function_nolock(ops);
8076e7d3737eSFrederic Weisbecker
807753cd885bSSong Liu out_unlock:
807853cd885bSSong Liu unlock_direct_mutex();
8079b0fc494fSSteven Rostedt return ret;
80803d083395SSteven Rostedt }
8081cdbe61bfSSteven Rostedt EXPORT_SYMBOL_GPL(register_ftrace_function);
80823d083395SSteven Rostedt
80833d083395SSteven Rostedt /**
808432632920SUwe Kleine-Koenig * unregister_ftrace_function - unregister a function for profiling.
808578cbc651SJiapeng Chong * @ops: ops structure that holds the function to unregister
80863d083395SSteven Rostedt *
80873d083395SSteven Rostedt * Unregister a function that was added to be called by ftrace profiling.
80883d083395SSteven Rostedt */
unregister_ftrace_function(struct ftrace_ops * ops)80893d083395SSteven Rostedt int unregister_ftrace_function(struct ftrace_ops *ops)
80903d083395SSteven Rostedt {
80913d083395SSteven Rostedt int ret;
80923d083395SSteven Rostedt
8093e6ea44e9SSteven Rostedt mutex_lock(&ftrace_lock);
80948a56d776SSteven Rostedt (Red Hat) ret = ftrace_shutdown(ops, 0);
8095e6ea44e9SSteven Rostedt mutex_unlock(&ftrace_lock);
8096b0fc494fSSteven Rostedt
809753cd885bSSong Liu cleanup_direct_functions_after_ipmodify(ops);
8098b0fc494fSSteven Rostedt return ret;
8099b0fc494fSSteven Rostedt }
8100cdbe61bfSSteven Rostedt EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8101b0fc494fSSteven Rostedt
symbols_cmp(const void * a,const void * b)8102bed0d9a5SJiri Olsa static int symbols_cmp(const void *a, const void *b)
8103bed0d9a5SJiri Olsa {
8104bed0d9a5SJiri Olsa const char **str_a = (const char **) a;
8105bed0d9a5SJiri Olsa const char **str_b = (const char **) b;
8106bed0d9a5SJiri Olsa
8107bed0d9a5SJiri Olsa return strcmp(*str_a, *str_b);
8108bed0d9a5SJiri Olsa }
8109bed0d9a5SJiri Olsa
8110bed0d9a5SJiri Olsa struct kallsyms_data {
8111bed0d9a5SJiri Olsa unsigned long *addrs;
8112bed0d9a5SJiri Olsa const char **syms;
8113bed0d9a5SJiri Olsa size_t cnt;
8114bed0d9a5SJiri Olsa size_t found;
8115bed0d9a5SJiri Olsa };
8116bed0d9a5SJiri Olsa
81173640bf85SJiri Olsa /* This function gets called for all kernel and module symbols
81183640bf85SJiri Olsa * and returns 1 in case we resolved all the requested symbols,
81193640bf85SJiri Olsa * 0 otherwise.
81203640bf85SJiri Olsa */
kallsyms_callback(void * data,const char * name,unsigned long addr)81213703bd54SZhen Lei static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8122bed0d9a5SJiri Olsa {
8123bed0d9a5SJiri Olsa struct kallsyms_data *args = data;
8124eb1b2985SJiri Olsa const char **sym;
8125eb1b2985SJiri Olsa int idx;
8126bed0d9a5SJiri Olsa
8127eb1b2985SJiri Olsa sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8128eb1b2985SJiri Olsa if (!sym)
8129eb1b2985SJiri Olsa return 0;
8130eb1b2985SJiri Olsa
8131eb1b2985SJiri Olsa idx = sym - args->syms;
8132eb1b2985SJiri Olsa if (args->addrs[idx])
8133bed0d9a5SJiri Olsa return 0;
8134bed0d9a5SJiri Olsa
81359d68c19cSJiri Olsa if (!ftrace_location(addr))
8136bed0d9a5SJiri Olsa return 0;
8137bed0d9a5SJiri Olsa
8138eb1b2985SJiri Olsa args->addrs[idx] = addr;
8139eb1b2985SJiri Olsa args->found++;
8140bed0d9a5SJiri Olsa return args->found == args->cnt ? 1 : 0;
8141bed0d9a5SJiri Olsa }
8142bed0d9a5SJiri Olsa
8143bed0d9a5SJiri Olsa /**
8144bed0d9a5SJiri Olsa * ftrace_lookup_symbols - Lookup addresses for array of symbols
8145bed0d9a5SJiri Olsa *
8146bed0d9a5SJiri Olsa * @sorted_syms: array of symbols pointers symbols to resolve,
8147bed0d9a5SJiri Olsa * must be alphabetically sorted
8148bed0d9a5SJiri Olsa * @cnt: number of symbols/addresses in @syms/@addrs arrays
8149bed0d9a5SJiri Olsa * @addrs: array for storing resulting addresses
8150bed0d9a5SJiri Olsa *
8151bed0d9a5SJiri Olsa * This function looks up addresses for array of symbols provided in
8152bed0d9a5SJiri Olsa * @syms array (must be alphabetically sorted) and stores them in
8153bed0d9a5SJiri Olsa * @addrs array, which needs to be big enough to store at least @cnt
8154bed0d9a5SJiri Olsa * addresses.
8155bed0d9a5SJiri Olsa *
8156bed0d9a5SJiri Olsa * This function returns 0 if all provided symbols are found,
8157bed0d9a5SJiri Olsa * -ESRCH otherwise.
8158bed0d9a5SJiri Olsa */
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)8159bed0d9a5SJiri Olsa int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8160bed0d9a5SJiri Olsa {
8161bed0d9a5SJiri Olsa struct kallsyms_data args;
81623640bf85SJiri Olsa int found_all;
8163bed0d9a5SJiri Olsa
8164eb1b2985SJiri Olsa memset(addrs, 0, sizeof(*addrs) * cnt);
8165bed0d9a5SJiri Olsa args.addrs = addrs;
8166bed0d9a5SJiri Olsa args.syms = sorted_syms;
8167bed0d9a5SJiri Olsa args.cnt = cnt;
8168bed0d9a5SJiri Olsa args.found = 0;
81693640bf85SJiri Olsa
81703640bf85SJiri Olsa found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
81713640bf85SJiri Olsa if (found_all)
81723640bf85SJiri Olsa return 0;
817307cc2c93SZhen Lei found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
81743640bf85SJiri Olsa return found_all ? 0 : -ESRCH;
8175bed0d9a5SJiri Olsa }
817644d35720SLinus Torvalds
81775d79fa0dSYueHaibing #ifdef CONFIG_SYSCTL
81788fd7c214SLuis Chamberlain
81798fd7c214SLuis Chamberlain #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_startup_sysctl(void)8180f8b7d2b4SLuis Chamberlain static void ftrace_startup_sysctl(void)
8181f8b7d2b4SLuis Chamberlain {
8182f8b7d2b4SLuis Chamberlain int command;
8183f8b7d2b4SLuis Chamberlain
8184f8b7d2b4SLuis Chamberlain if (unlikely(ftrace_disabled))
8185f8b7d2b4SLuis Chamberlain return;
8186f8b7d2b4SLuis Chamberlain
8187f8b7d2b4SLuis Chamberlain /* Force update next time */
8188f8b7d2b4SLuis Chamberlain saved_ftrace_func = NULL;
8189f8b7d2b4SLuis Chamberlain /* ftrace_start_up is true if we want ftrace running */
8190f8b7d2b4SLuis Chamberlain if (ftrace_start_up) {
8191f8b7d2b4SLuis Chamberlain command = FTRACE_UPDATE_CALLS;
8192f8b7d2b4SLuis Chamberlain if (ftrace_graph_active)
8193f8b7d2b4SLuis Chamberlain command |= FTRACE_START_FUNC_RET;
8194f8b7d2b4SLuis Chamberlain ftrace_startup_enable(command);
8195f8b7d2b4SLuis Chamberlain }
8196f8b7d2b4SLuis Chamberlain }
8197f8b7d2b4SLuis Chamberlain
ftrace_shutdown_sysctl(void)8198f8b7d2b4SLuis Chamberlain static void ftrace_shutdown_sysctl(void)
8199f8b7d2b4SLuis Chamberlain {
8200f8b7d2b4SLuis Chamberlain int command;
8201f8b7d2b4SLuis Chamberlain
8202f8b7d2b4SLuis Chamberlain if (unlikely(ftrace_disabled))
8203f8b7d2b4SLuis Chamberlain return;
8204f8b7d2b4SLuis Chamberlain
8205f8b7d2b4SLuis Chamberlain /* ftrace_start_up is true if ftrace is running */
8206f8b7d2b4SLuis Chamberlain if (ftrace_start_up) {
8207f8b7d2b4SLuis Chamberlain command = FTRACE_DISABLE_CALLS;
8208f8b7d2b4SLuis Chamberlain if (ftrace_graph_active)
8209f8b7d2b4SLuis Chamberlain command |= FTRACE_STOP_FUNC_RET;
8210f8b7d2b4SLuis Chamberlain ftrace_run_update_code(command);
8211f8b7d2b4SLuis Chamberlain }
8212f8b7d2b4SLuis Chamberlain }
82138fd7c214SLuis Chamberlain #else
82148fd7c214SLuis Chamberlain # define ftrace_startup_sysctl() do { } while (0)
82158fd7c214SLuis Chamberlain # define ftrace_shutdown_sysctl() do { } while (0)
82168fd7c214SLuis Chamberlain #endif /* CONFIG_DYNAMIC_FTRACE */
8217f8b7d2b4SLuis Chamberlain
is_permanent_ops_registered(void)82187162431dSMiroslav Benes static bool is_permanent_ops_registered(void)
82197162431dSMiroslav Benes {
82207162431dSMiroslav Benes struct ftrace_ops *op;
82217162431dSMiroslav Benes
82227162431dSMiroslav Benes do_for_each_ftrace_op(op, ftrace_ops_list) {
82237162431dSMiroslav Benes if (op->flags & FTRACE_OPS_FL_PERMANENT)
82247162431dSMiroslav Benes return true;
82257162431dSMiroslav Benes } while_for_each_ftrace_op(op);
82267162431dSMiroslav Benes
82277162431dSMiroslav Benes return false;
82287162431dSMiroslav Benes }
82297162431dSMiroslav Benes
82308e4e83b2SWei Xiao static int
ftrace_enable_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)8231b0fc494fSSteven Rostedt ftrace_enable_sysctl(struct ctl_table *table, int write,
823254fa9ba5STobias Klauser void *buffer, size_t *lenp, loff_t *ppos)
8233b0fc494fSSteven Rostedt {
823445a4a237SSteven Rostedt int ret = -ENODEV;
82354eebcc81SSteven Rostedt
8236e6ea44e9SSteven Rostedt mutex_lock(&ftrace_lock);
8237b0fc494fSSteven Rostedt
823845a4a237SSteven Rostedt if (unlikely(ftrace_disabled))
823945a4a237SSteven Rostedt goto out;
824045a4a237SSteven Rostedt
82418d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos);
8242b0fc494fSSteven Rostedt
8243a32c7765SLi Zefan if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8244b0fc494fSSteven Rostedt goto out;
8245b0fc494fSSteven Rostedt
8246b0fc494fSSteven Rostedt if (ftrace_enabled) {
8247b0fc494fSSteven Rostedt
8248b0fc494fSSteven Rostedt /* we are starting ftrace again */
8249f86f4180SChunyan Zhang if (rcu_dereference_protected(ftrace_ops_list,
8250f86f4180SChunyan Zhang lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
82515000c418SJan Kiszka update_ftrace_function();
8252b0fc494fSSteven Rostedt
8253524a3868SSteven Rostedt (Red Hat) ftrace_startup_sysctl();
8254524a3868SSteven Rostedt (Red Hat)
8255b0fc494fSSteven Rostedt } else {
82567162431dSMiroslav Benes if (is_permanent_ops_registered()) {
82577162431dSMiroslav Benes ftrace_enabled = true;
82587162431dSMiroslav Benes ret = -EBUSY;
82597162431dSMiroslav Benes goto out;
82607162431dSMiroslav Benes }
82617162431dSMiroslav Benes
8262b0fc494fSSteven Rostedt /* stopping ftrace calls (just send to ftrace_stub) */
8263b0fc494fSSteven Rostedt ftrace_trace_function = ftrace_stub;
8264b0fc494fSSteven Rostedt
8265b0fc494fSSteven Rostedt ftrace_shutdown_sysctl();
8266b0fc494fSSteven Rostedt }
8267b0fc494fSSteven Rostedt
82687162431dSMiroslav Benes last_ftrace_enabled = !!ftrace_enabled;
8269b0fc494fSSteven Rostedt out:
8270e6ea44e9SSteven Rostedt mutex_unlock(&ftrace_lock);
82713d083395SSteven Rostedt return ret;
827216444a8aSArnaldo Carvalho de Melo }
82738e4e83b2SWei Xiao
82748e4e83b2SWei Xiao static struct ctl_table ftrace_sysctls[] = {
82758e4e83b2SWei Xiao {
82768e4e83b2SWei Xiao .procname = "ftrace_enabled",
82778e4e83b2SWei Xiao .data = &ftrace_enabled,
82788e4e83b2SWei Xiao .maxlen = sizeof(int),
82798e4e83b2SWei Xiao .mode = 0644,
82808e4e83b2SWei Xiao .proc_handler = ftrace_enable_sysctl,
82818e4e83b2SWei Xiao },
82828e4e83b2SWei Xiao {}
82838e4e83b2SWei Xiao };
82848e4e83b2SWei Xiao
ftrace_sysctl_init(void)82858e4e83b2SWei Xiao static int __init ftrace_sysctl_init(void)
82868e4e83b2SWei Xiao {
82878e4e83b2SWei Xiao register_sysctl_init("kernel", ftrace_sysctls);
82888e4e83b2SWei Xiao return 0;
82898e4e83b2SWei Xiao }
82908e4e83b2SWei Xiao late_initcall(ftrace_sysctl_init);
82918e4e83b2SWei Xiao #endif
8292