1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17 
18 #include "trace.h"
19 
20 /* function tracing enabled */
21 static int			ftrace_function_enabled;
22 
23 static struct trace_array	*func_trace;
24 
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27 
28 static int function_trace_init(struct trace_array *tr)
29 {
30 	func_trace = tr;
31 	tr->cpu = get_cpu();
32 	put_cpu();
33 
34 	tracing_start_cmdline_record();
35 	tracing_start_function_trace();
36 	return 0;
37 }
38 
39 static void function_trace_reset(struct trace_array *tr)
40 {
41 	tracing_stop_function_trace();
42 	tracing_stop_cmdline_record();
43 }
44 
45 static void function_trace_start(struct trace_array *tr)
46 {
47 	tracing_reset_online_cpus(tr);
48 }
49 
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 				 struct ftrace_ops *op, struct pt_regs *pt_regs)
53 {
54 	struct trace_array *tr = func_trace;
55 	struct trace_array_cpu *data;
56 	unsigned long flags;
57 	long disabled;
58 	int cpu;
59 	int pc;
60 
61 	if (unlikely(!ftrace_function_enabled))
62 		return;
63 
64 	pc = preempt_count();
65 	preempt_disable_notrace();
66 	local_save_flags(flags);
67 	cpu = raw_smp_processor_id();
68 	data = tr->data[cpu];
69 	disabled = atomic_inc_return(&data->disabled);
70 
71 	if (likely(disabled == 1))
72 		trace_function(tr, ip, parent_ip, flags, pc);
73 
74 	atomic_dec(&data->disabled);
75 	preempt_enable_notrace();
76 }
77 
78 /* Our option */
79 enum {
80 	TRACE_FUNC_OPT_STACK	= 0x1,
81 };
82 
83 static struct tracer_flags func_flags;
84 
85 static void
86 function_trace_call(unsigned long ip, unsigned long parent_ip,
87 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
88 
89 {
90 	struct trace_array *tr = func_trace;
91 	struct trace_array_cpu *data;
92 	unsigned long flags;
93 	long disabled;
94 	int cpu;
95 	int pc;
96 
97 	if (unlikely(!ftrace_function_enabled))
98 		return;
99 
100 	/*
101 	 * Need to use raw, since this must be called before the
102 	 * recursive protection is performed.
103 	 */
104 	local_irq_save(flags);
105 	cpu = raw_smp_processor_id();
106 	data = tr->data[cpu];
107 	disabled = atomic_inc_return(&data->disabled);
108 
109 	if (likely(disabled == 1)) {
110 		pc = preempt_count();
111 		trace_function(tr, ip, parent_ip, flags, pc);
112 	}
113 
114 	atomic_dec(&data->disabled);
115 	local_irq_restore(flags);
116 }
117 
118 static void
119 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
120 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
121 {
122 	struct trace_array *tr = func_trace;
123 	struct trace_array_cpu *data;
124 	unsigned long flags;
125 	long disabled;
126 	int cpu;
127 	int pc;
128 
129 	if (unlikely(!ftrace_function_enabled))
130 		return;
131 
132 	/*
133 	 * Need to use raw, since this must be called before the
134 	 * recursive protection is performed.
135 	 */
136 	local_irq_save(flags);
137 	cpu = raw_smp_processor_id();
138 	data = tr->data[cpu];
139 	disabled = atomic_inc_return(&data->disabled);
140 
141 	if (likely(disabled == 1)) {
142 		pc = preempt_count();
143 		trace_function(tr, ip, parent_ip, flags, pc);
144 		/*
145 		 * skip over 5 funcs:
146 		 *    __ftrace_trace_stack,
147 		 *    __trace_stack,
148 		 *    function_stack_trace_call
149 		 *    ftrace_list_func
150 		 *    ftrace_call
151 		 */
152 		__trace_stack(tr, flags, 5, pc);
153 	}
154 
155 	atomic_dec(&data->disabled);
156 	local_irq_restore(flags);
157 }
158 
159 
160 static struct ftrace_ops trace_ops __read_mostly =
161 {
162 	.func = function_trace_call,
163 	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
164 };
165 
166 static struct ftrace_ops trace_stack_ops __read_mostly =
167 {
168 	.func = function_stack_trace_call,
169 	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
170 };
171 
172 static struct tracer_opt func_opts[] = {
173 #ifdef CONFIG_STACKTRACE
174 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
175 #endif
176 	{ } /* Always set a last empty entry */
177 };
178 
179 static struct tracer_flags func_flags = {
180 	.val = 0, /* By default: all flags disabled */
181 	.opts = func_opts
182 };
183 
184 static void tracing_start_function_trace(void)
185 {
186 	ftrace_function_enabled = 0;
187 
188 	if (trace_flags & TRACE_ITER_PREEMPTONLY)
189 		trace_ops.func = function_trace_call_preempt_only;
190 	else
191 		trace_ops.func = function_trace_call;
192 
193 	if (func_flags.val & TRACE_FUNC_OPT_STACK)
194 		register_ftrace_function(&trace_stack_ops);
195 	else
196 		register_ftrace_function(&trace_ops);
197 
198 	ftrace_function_enabled = 1;
199 }
200 
201 static void tracing_stop_function_trace(void)
202 {
203 	ftrace_function_enabled = 0;
204 
205 	if (func_flags.val & TRACE_FUNC_OPT_STACK)
206 		unregister_ftrace_function(&trace_stack_ops);
207 	else
208 		unregister_ftrace_function(&trace_ops);
209 }
210 
211 static int func_set_flag(u32 old_flags, u32 bit, int set)
212 {
213 	switch (bit) {
214 	case TRACE_FUNC_OPT_STACK:
215 		/* do nothing if already set */
216 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
217 			break;
218 
219 		if (set) {
220 			unregister_ftrace_function(&trace_ops);
221 			register_ftrace_function(&trace_stack_ops);
222 		} else {
223 			unregister_ftrace_function(&trace_stack_ops);
224 			register_ftrace_function(&trace_ops);
225 		}
226 
227 		break;
228 	default:
229 		return -EINVAL;
230 	}
231 
232 	return 0;
233 }
234 
235 static struct tracer function_trace __read_mostly =
236 {
237 	.name		= "function",
238 	.init		= function_trace_init,
239 	.reset		= function_trace_reset,
240 	.start		= function_trace_start,
241 	.wait_pipe	= poll_wait_pipe,
242 	.flags		= &func_flags,
243 	.set_flag	= func_set_flag,
244 #ifdef CONFIG_FTRACE_SELFTEST
245 	.selftest	= trace_selftest_startup_function,
246 #endif
247 };
248 
249 #ifdef CONFIG_DYNAMIC_FTRACE
250 static void
251 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
252 {
253 	long *count = (long *)data;
254 
255 	if (tracing_is_on())
256 		return;
257 
258 	if (!*count)
259 		return;
260 
261 	if (*count != -1)
262 		(*count)--;
263 
264 	tracing_on();
265 }
266 
267 static void
268 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
269 {
270 	long *count = (long *)data;
271 
272 	if (!tracing_is_on())
273 		return;
274 
275 	if (!*count)
276 		return;
277 
278 	if (*count != -1)
279 		(*count)--;
280 
281 	tracing_off();
282 }
283 
284 static int
285 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
286 			 struct ftrace_probe_ops *ops, void *data);
287 
288 static struct ftrace_probe_ops traceon_probe_ops = {
289 	.func			= ftrace_traceon,
290 	.print			= ftrace_trace_onoff_print,
291 };
292 
293 static struct ftrace_probe_ops traceoff_probe_ops = {
294 	.func			= ftrace_traceoff,
295 	.print			= ftrace_trace_onoff_print,
296 };
297 
298 static int
299 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
300 			 struct ftrace_probe_ops *ops, void *data)
301 {
302 	long count = (long)data;
303 
304 	seq_printf(m, "%ps:", (void *)ip);
305 
306 	if (ops == &traceon_probe_ops)
307 		seq_printf(m, "traceon");
308 	else
309 		seq_printf(m, "traceoff");
310 
311 	if (count == -1)
312 		seq_printf(m, ":unlimited\n");
313 	else
314 		seq_printf(m, ":count=%ld\n", count);
315 
316 	return 0;
317 }
318 
319 static int
320 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
321 {
322 	struct ftrace_probe_ops *ops;
323 
324 	/* we register both traceon and traceoff to this callback */
325 	if (strcmp(cmd, "traceon") == 0)
326 		ops = &traceon_probe_ops;
327 	else
328 		ops = &traceoff_probe_ops;
329 
330 	unregister_ftrace_function_probe_func(glob, ops);
331 
332 	return 0;
333 }
334 
335 static int
336 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
337 			    char *glob, char *cmd, char *param, int enable)
338 {
339 	struct ftrace_probe_ops *ops;
340 	void *count = (void *)-1;
341 	char *number;
342 	int ret;
343 
344 	/* hash funcs only work with set_ftrace_filter */
345 	if (!enable)
346 		return -EINVAL;
347 
348 	if (glob[0] == '!')
349 		return ftrace_trace_onoff_unreg(glob+1, cmd, param);
350 
351 	/* we register both traceon and traceoff to this callback */
352 	if (strcmp(cmd, "traceon") == 0)
353 		ops = &traceon_probe_ops;
354 	else
355 		ops = &traceoff_probe_ops;
356 
357 	if (!param)
358 		goto out_reg;
359 
360 	number = strsep(&param, ":");
361 
362 	if (!strlen(number))
363 		goto out_reg;
364 
365 	/*
366 	 * We use the callback data field (which is a pointer)
367 	 * as our counter.
368 	 */
369 	ret = kstrtoul(number, 0, (unsigned long *)&count);
370 	if (ret)
371 		return ret;
372 
373  out_reg:
374 	ret = register_ftrace_function_probe(glob, ops, count);
375 
376 	return ret < 0 ? ret : 0;
377 }
378 
379 static struct ftrace_func_command ftrace_traceon_cmd = {
380 	.name			= "traceon",
381 	.func			= ftrace_trace_onoff_callback,
382 };
383 
384 static struct ftrace_func_command ftrace_traceoff_cmd = {
385 	.name			= "traceoff",
386 	.func			= ftrace_trace_onoff_callback,
387 };
388 
389 static int __init init_func_cmd_traceon(void)
390 {
391 	int ret;
392 
393 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
394 	if (ret)
395 		return ret;
396 
397 	ret = register_ftrace_command(&ftrace_traceon_cmd);
398 	if (ret)
399 		unregister_ftrace_command(&ftrace_traceoff_cmd);
400 	return ret;
401 }
402 #else
403 static inline int init_func_cmd_traceon(void)
404 {
405 	return 0;
406 }
407 #endif /* CONFIG_DYNAMIC_FTRACE */
408 
409 static __init int init_function_trace(void)
410 {
411 	init_func_cmd_traceon();
412 	return register_tracer(&function_trace);
413 }
414 core_initcall(init_function_trace);
415