1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17 
18 #include "trace.h"
19 
20 /* function tracing enabled */
21 static int			ftrace_function_enabled;
22 
23 static struct trace_array	*func_trace;
24 
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27 
28 static int function_trace_init(struct trace_array *tr)
29 {
30 	func_trace = tr;
31 	tr->cpu = get_cpu();
32 	put_cpu();
33 
34 	tracing_start_cmdline_record();
35 	tracing_start_function_trace();
36 	return 0;
37 }
38 
39 static void function_trace_reset(struct trace_array *tr)
40 {
41 	tracing_stop_function_trace();
42 	tracing_stop_cmdline_record();
43 }
44 
45 static void function_trace_start(struct trace_array *tr)
46 {
47 	tracing_reset_online_cpus(tr);
48 }
49 
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52 {
53 	struct trace_array *tr = func_trace;
54 	struct trace_array_cpu *data;
55 	unsigned long flags;
56 	long disabled;
57 	int cpu, resched;
58 	int pc;
59 
60 	if (unlikely(!ftrace_function_enabled))
61 		return;
62 
63 	pc = preempt_count();
64 	resched = ftrace_preempt_disable();
65 	local_save_flags(flags);
66 	cpu = raw_smp_processor_id();
67 	data = tr->data[cpu];
68 	disabled = atomic_inc_return(&data->disabled);
69 
70 	if (likely(disabled == 1))
71 		trace_function(tr, ip, parent_ip, flags, pc);
72 
73 	atomic_dec(&data->disabled);
74 	ftrace_preempt_enable(resched);
75 }
76 
77 static void
78 function_trace_call(unsigned long ip, unsigned long parent_ip)
79 {
80 	struct trace_array *tr = func_trace;
81 	struct trace_array_cpu *data;
82 	unsigned long flags;
83 	long disabled;
84 	int cpu;
85 	int pc;
86 
87 	if (unlikely(!ftrace_function_enabled))
88 		return;
89 
90 	/*
91 	 * Need to use raw, since this must be called before the
92 	 * recursive protection is performed.
93 	 */
94 	local_irq_save(flags);
95 	cpu = raw_smp_processor_id();
96 	data = tr->data[cpu];
97 	disabled = atomic_inc_return(&data->disabled);
98 
99 	if (likely(disabled == 1)) {
100 		pc = preempt_count();
101 		trace_function(tr, ip, parent_ip, flags, pc);
102 	}
103 
104 	atomic_dec(&data->disabled);
105 	local_irq_restore(flags);
106 }
107 
108 static void
109 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110 {
111 	struct trace_array *tr = func_trace;
112 	struct trace_array_cpu *data;
113 	unsigned long flags;
114 	long disabled;
115 	int cpu;
116 	int pc;
117 
118 	if (unlikely(!ftrace_function_enabled))
119 		return;
120 
121 	/*
122 	 * Need to use raw, since this must be called before the
123 	 * recursive protection is performed.
124 	 */
125 	local_irq_save(flags);
126 	cpu = raw_smp_processor_id();
127 	data = tr->data[cpu];
128 	disabled = atomic_inc_return(&data->disabled);
129 
130 	if (likely(disabled == 1)) {
131 		pc = preempt_count();
132 		trace_function(tr, ip, parent_ip, flags, pc);
133 		/*
134 		 * skip over 5 funcs:
135 		 *    __ftrace_trace_stack,
136 		 *    __trace_stack,
137 		 *    function_stack_trace_call
138 		 *    ftrace_list_func
139 		 *    ftrace_call
140 		 */
141 		__trace_stack(tr, flags, 5, pc);
142 	}
143 
144 	atomic_dec(&data->disabled);
145 	local_irq_restore(flags);
146 }
147 
148 
149 static struct ftrace_ops trace_ops __read_mostly =
150 {
151 	.func = function_trace_call,
152 };
153 
154 static struct ftrace_ops trace_stack_ops __read_mostly =
155 {
156 	.func = function_stack_trace_call,
157 };
158 
159 /* Our two options */
160 enum {
161 	TRACE_FUNC_OPT_STACK = 0x1,
162 };
163 
164 static struct tracer_opt func_opts[] = {
165 #ifdef CONFIG_STACKTRACE
166 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
167 #endif
168 	{ } /* Always set a last empty entry */
169 };
170 
171 static struct tracer_flags func_flags = {
172 	.val = 0, /* By default: all flags disabled */
173 	.opts = func_opts
174 };
175 
176 static void tracing_start_function_trace(void)
177 {
178 	ftrace_function_enabled = 0;
179 
180 	if (trace_flags & TRACE_ITER_PREEMPTONLY)
181 		trace_ops.func = function_trace_call_preempt_only;
182 	else
183 		trace_ops.func = function_trace_call;
184 
185 	if (func_flags.val & TRACE_FUNC_OPT_STACK)
186 		register_ftrace_function(&trace_stack_ops);
187 	else
188 		register_ftrace_function(&trace_ops);
189 
190 	ftrace_function_enabled = 1;
191 }
192 
193 static void tracing_stop_function_trace(void)
194 {
195 	ftrace_function_enabled = 0;
196 	/* OK if they are not registered */
197 	unregister_ftrace_function(&trace_stack_ops);
198 	unregister_ftrace_function(&trace_ops);
199 }
200 
201 static int func_set_flag(u32 old_flags, u32 bit, int set)
202 {
203 	if (bit == TRACE_FUNC_OPT_STACK) {
204 		/* do nothing if already set */
205 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
206 			return 0;
207 
208 		if (set) {
209 			unregister_ftrace_function(&trace_ops);
210 			register_ftrace_function(&trace_stack_ops);
211 		} else {
212 			unregister_ftrace_function(&trace_stack_ops);
213 			register_ftrace_function(&trace_ops);
214 		}
215 
216 		return 0;
217 	}
218 
219 	return -EINVAL;
220 }
221 
222 static struct tracer function_trace __read_mostly =
223 {
224 	.name		= "function",
225 	.init		= function_trace_init,
226 	.reset		= function_trace_reset,
227 	.start		= function_trace_start,
228 	.wait_pipe	= poll_wait_pipe,
229 	.flags		= &func_flags,
230 	.set_flag	= func_set_flag,
231 #ifdef CONFIG_FTRACE_SELFTEST
232 	.selftest	= trace_selftest_startup_function,
233 #endif
234 };
235 
236 #ifdef CONFIG_DYNAMIC_FTRACE
237 static void
238 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
239 {
240 	long *count = (long *)data;
241 
242 	if (tracing_is_on())
243 		return;
244 
245 	if (!*count)
246 		return;
247 
248 	if (*count != -1)
249 		(*count)--;
250 
251 	tracing_on();
252 }
253 
254 static void
255 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
256 {
257 	long *count = (long *)data;
258 
259 	if (!tracing_is_on())
260 		return;
261 
262 	if (!*count)
263 		return;
264 
265 	if (*count != -1)
266 		(*count)--;
267 
268 	tracing_off();
269 }
270 
271 static int
272 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
273 			 struct ftrace_probe_ops *ops, void *data);
274 
275 static struct ftrace_probe_ops traceon_probe_ops = {
276 	.func			= ftrace_traceon,
277 	.print			= ftrace_trace_onoff_print,
278 };
279 
280 static struct ftrace_probe_ops traceoff_probe_ops = {
281 	.func			= ftrace_traceoff,
282 	.print			= ftrace_trace_onoff_print,
283 };
284 
285 static int
286 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
287 			 struct ftrace_probe_ops *ops, void *data)
288 {
289 	char str[KSYM_SYMBOL_LEN];
290 	long count = (long)data;
291 
292 	kallsyms_lookup(ip, NULL, NULL, NULL, str);
293 	seq_printf(m, "%s:", str);
294 
295 	if (ops == &traceon_probe_ops)
296 		seq_printf(m, "traceon");
297 	else
298 		seq_printf(m, "traceoff");
299 
300 	if (count == -1)
301 		seq_printf(m, ":unlimited\n");
302 	else
303 		seq_printf(m, ":count=%ld", count);
304 	seq_putc(m, '\n');
305 
306 	return 0;
307 }
308 
309 static int
310 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
311 {
312 	struct ftrace_probe_ops *ops;
313 
314 	/* we register both traceon and traceoff to this callback */
315 	if (strcmp(cmd, "traceon") == 0)
316 		ops = &traceon_probe_ops;
317 	else
318 		ops = &traceoff_probe_ops;
319 
320 	unregister_ftrace_function_probe_func(glob, ops);
321 
322 	return 0;
323 }
324 
325 static int
326 ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
327 {
328 	struct ftrace_probe_ops *ops;
329 	void *count = (void *)-1;
330 	char *number;
331 	int ret;
332 
333 	/* hash funcs only work with set_ftrace_filter */
334 	if (!enable)
335 		return -EINVAL;
336 
337 	if (glob[0] == '!')
338 		return ftrace_trace_onoff_unreg(glob+1, cmd, param);
339 
340 	/* we register both traceon and traceoff to this callback */
341 	if (strcmp(cmd, "traceon") == 0)
342 		ops = &traceon_probe_ops;
343 	else
344 		ops = &traceoff_probe_ops;
345 
346 	if (!param)
347 		goto out_reg;
348 
349 	number = strsep(&param, ":");
350 
351 	if (!strlen(number))
352 		goto out_reg;
353 
354 	/*
355 	 * We use the callback data field (which is a pointer)
356 	 * as our counter.
357 	 */
358 	ret = strict_strtoul(number, 0, (unsigned long *)&count);
359 	if (ret)
360 		return ret;
361 
362  out_reg:
363 	ret = register_ftrace_function_probe(glob, ops, count);
364 
365 	return ret;
366 }
367 
368 static struct ftrace_func_command ftrace_traceon_cmd = {
369 	.name			= "traceon",
370 	.func			= ftrace_trace_onoff_callback,
371 };
372 
373 static struct ftrace_func_command ftrace_traceoff_cmd = {
374 	.name			= "traceoff",
375 	.func			= ftrace_trace_onoff_callback,
376 };
377 
378 static int __init init_func_cmd_traceon(void)
379 {
380 	int ret;
381 
382 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
383 	if (ret)
384 		return ret;
385 
386 	ret = register_ftrace_command(&ftrace_traceon_cmd);
387 	if (ret)
388 		unregister_ftrace_command(&ftrace_traceoff_cmd);
389 	return ret;
390 }
391 #else
392 static inline int init_func_cmd_traceon(void)
393 {
394 	return 0;
395 }
396 #endif /* CONFIG_DYNAMIC_FTRACE */
397 
398 static __init int init_function_trace(void)
399 {
400 	init_func_cmd_traceon();
401 	return register_tracer(&function_trace);
402 }
403 device_initcall(init_function_trace);
404 
405