xref: /openbmc/linux/kernel/trace/trace_functions.c (revision 6f6249a599e52e1a5f0b632f8edff733cfa76450)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
ftrace_allocate_ftrace_ops(struct trace_array * tr)52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
ftrace_free_ftrace_ops(struct trace_array * tr)74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	/*
84 	 * The top level array uses the "global_ops", and the files are
85 	 * created on boot up.
86 	 */
87 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 		return 0;
89 
90 	if (!tr->ops)
91 		return -EINVAL;
92 
93 	ftrace_create_filter_files(tr->ops, parent);
94 
95 	return 0;
96 }
97 
ftrace_destroy_function_files(struct trace_array * tr)98 void ftrace_destroy_function_files(struct trace_array *tr)
99 {
100 	ftrace_destroy_filter_files(tr->ops);
101 	ftrace_free_ftrace_ops(tr);
102 }
103 
select_trace_function(u32 flags_val)104 static ftrace_func_t select_trace_function(u32 flags_val)
105 {
106 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 	case TRACE_FUNC_NO_OPTS:
108 		return function_trace_call;
109 	case TRACE_FUNC_OPT_STACK:
110 		return function_stack_trace_call;
111 	case TRACE_FUNC_OPT_NO_REPEATS:
112 		return function_no_repeats_trace_call;
113 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 		return function_stack_no_repeats_trace_call;
115 	default:
116 		return NULL;
117 	}
118 }
119 
handle_func_repeats(struct trace_array * tr,u32 flags_val)120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121 {
122 	if (!tr->last_func_repeats &&
123 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 		if (!tr->last_func_repeats)
126 			return false;
127 	}
128 
129 	return true;
130 }
131 
function_trace_init(struct trace_array * tr)132 static int function_trace_init(struct trace_array *tr)
133 {
134 	ftrace_func_t func;
135 	/*
136 	 * Instance trace_arrays get their ops allocated
137 	 * at instance creation. Unless it failed
138 	 * the allocation.
139 	 */
140 	if (!tr->ops)
141 		return -ENOMEM;
142 
143 	func = select_trace_function(func_flags.val);
144 	if (!func)
145 		return -EINVAL;
146 
147 	if (!handle_func_repeats(tr, func_flags.val))
148 		return -ENOMEM;
149 
150 	ftrace_init_array_ops(tr, func);
151 
152 	tr->array_buffer.cpu = raw_smp_processor_id();
153 
154 	tracing_start_cmdline_record();
155 	tracing_start_function_trace(tr);
156 	return 0;
157 }
158 
function_trace_reset(struct trace_array * tr)159 static void function_trace_reset(struct trace_array *tr)
160 {
161 	tracing_stop_function_trace(tr);
162 	tracing_stop_cmdline_record();
163 	ftrace_reset_array_ops(tr);
164 }
165 
function_trace_start(struct trace_array * tr)166 static void function_trace_start(struct trace_array *tr)
167 {
168 	tracing_reset_online_cpus(&tr->array_buffer);
169 }
170 
171 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)172 function_trace_call(unsigned long ip, unsigned long parent_ip,
173 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174 {
175 	struct trace_array *tr = op->private;
176 	struct trace_array_cpu *data;
177 	unsigned int trace_ctx;
178 	int bit;
179 	int cpu;
180 
181 	if (unlikely(!tr->function_enabled))
182 		return;
183 
184 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 	if (bit < 0)
186 		return;
187 
188 	trace_ctx = tracing_gen_ctx_dec();
189 
190 	cpu = smp_processor_id();
191 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
192 	if (!atomic_read(&data->disabled))
193 		trace_function(tr, ip, parent_ip, trace_ctx);
194 
195 	ftrace_test_recursion_unlock(bit);
196 }
197 
198 #ifdef CONFIG_UNWINDER_ORC
199 /*
200  * Skip 2:
201  *
202  *   function_stack_trace_call()
203  *   ftrace_call()
204  */
205 #define STACK_SKIP 2
206 #else
207 /*
208  * Skip 3:
209  *   __trace_stack()
210  *   function_stack_trace_call()
211  *   ftrace_call()
212  */
213 #define STACK_SKIP 3
214 #endif
215 
216 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)217 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
218 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
219 {
220 	struct trace_array *tr = op->private;
221 	struct trace_array_cpu *data;
222 	unsigned long flags;
223 	long disabled;
224 	int cpu;
225 	unsigned int trace_ctx;
226 
227 	if (unlikely(!tr->function_enabled))
228 		return;
229 
230 	/*
231 	 * Need to use raw, since this must be called before the
232 	 * recursive protection is performed.
233 	 */
234 	local_irq_save(flags);
235 	cpu = raw_smp_processor_id();
236 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
237 	disabled = atomic_inc_return(&data->disabled);
238 
239 	if (likely(disabled == 1)) {
240 		trace_ctx = tracing_gen_ctx_flags(flags);
241 		trace_function(tr, ip, parent_ip, trace_ctx);
242 		__trace_stack(tr, trace_ctx, STACK_SKIP);
243 	}
244 
245 	atomic_dec(&data->disabled);
246 	local_irq_restore(flags);
247 }
248 
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)249 static inline bool is_repeat_check(struct trace_array *tr,
250 				   struct trace_func_repeats *last_info,
251 				   unsigned long ip, unsigned long parent_ip)
252 {
253 	if (last_info->ip == ip &&
254 	    last_info->parent_ip == parent_ip &&
255 	    last_info->count < U16_MAX) {
256 		last_info->ts_last_call =
257 			ring_buffer_time_stamp(tr->array_buffer.buffer);
258 		last_info->count++;
259 		return true;
260 	}
261 
262 	return false;
263 }
264 
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)265 static inline void process_repeats(struct trace_array *tr,
266 				   unsigned long ip, unsigned long parent_ip,
267 				   struct trace_func_repeats *last_info,
268 				   unsigned int trace_ctx)
269 {
270 	if (last_info->count) {
271 		trace_last_func_repeats(tr, last_info, trace_ctx);
272 		last_info->count = 0;
273 	}
274 
275 	last_info->ip = ip;
276 	last_info->parent_ip = parent_ip;
277 }
278 
279 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)280 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
281 			       struct ftrace_ops *op,
282 			       struct ftrace_regs *fregs)
283 {
284 	struct trace_func_repeats *last_info;
285 	struct trace_array *tr = op->private;
286 	struct trace_array_cpu *data;
287 	unsigned int trace_ctx;
288 	int bit;
289 	int cpu;
290 
291 	if (unlikely(!tr->function_enabled))
292 		return;
293 
294 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
295 	if (bit < 0)
296 		return;
297 
298 	cpu = smp_processor_id();
299 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
300 	if (atomic_read(&data->disabled))
301 		goto out;
302 
303 	/*
304 	 * An interrupt may happen at any place here. But as far as I can see,
305 	 * the only damage that this can cause is to mess up the repetition
306 	 * counter without valuable data being lost.
307 	 * TODO: think about a solution that is better than just hoping to be
308 	 * lucky.
309 	 */
310 	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
311 	if (is_repeat_check(tr, last_info, ip, parent_ip))
312 		goto out;
313 
314 	trace_ctx = tracing_gen_ctx_dec();
315 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
316 
317 	trace_function(tr, ip, parent_ip, trace_ctx);
318 
319 out:
320 	ftrace_test_recursion_unlock(bit);
321 }
322 
323 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)324 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
325 				     struct ftrace_ops *op,
326 				     struct ftrace_regs *fregs)
327 {
328 	struct trace_func_repeats *last_info;
329 	struct trace_array *tr = op->private;
330 	struct trace_array_cpu *data;
331 	unsigned long flags;
332 	long disabled;
333 	int cpu;
334 	unsigned int trace_ctx;
335 
336 	if (unlikely(!tr->function_enabled))
337 		return;
338 
339 	/*
340 	 * Need to use raw, since this must be called before the
341 	 * recursive protection is performed.
342 	 */
343 	local_irq_save(flags);
344 	cpu = raw_smp_processor_id();
345 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
346 	disabled = atomic_inc_return(&data->disabled);
347 
348 	if (likely(disabled == 1)) {
349 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
350 		if (is_repeat_check(tr, last_info, ip, parent_ip))
351 			goto out;
352 
353 		trace_ctx = tracing_gen_ctx_flags(flags);
354 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
355 
356 		trace_function(tr, ip, parent_ip, trace_ctx);
357 		__trace_stack(tr, trace_ctx, STACK_SKIP);
358 	}
359 
360  out:
361 	atomic_dec(&data->disabled);
362 	local_irq_restore(flags);
363 }
364 
365 static struct tracer_opt func_opts[] = {
366 #ifdef CONFIG_STACKTRACE
367 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
368 #endif
369 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
370 	{ } /* Always set a last empty entry */
371 };
372 
373 static struct tracer_flags func_flags = {
374 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
375 	.opts = func_opts
376 };
377 
tracing_start_function_trace(struct trace_array * tr)378 static void tracing_start_function_trace(struct trace_array *tr)
379 {
380 	tr->function_enabled = 0;
381 	register_ftrace_function(tr->ops);
382 	tr->function_enabled = 1;
383 }
384 
tracing_stop_function_trace(struct trace_array * tr)385 static void tracing_stop_function_trace(struct trace_array *tr)
386 {
387 	tr->function_enabled = 0;
388 	unregister_ftrace_function(tr->ops);
389 }
390 
391 static struct tracer function_trace;
392 
393 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)394 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
395 {
396 	ftrace_func_t func;
397 	u32 new_flags;
398 
399 	/* Do nothing if already set. */
400 	if (!!set == !!(func_flags.val & bit))
401 		return 0;
402 
403 	/* We can change this flag only when not running. */
404 	if (tr->current_trace != &function_trace)
405 		return 0;
406 
407 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
408 	func = select_trace_function(new_flags);
409 	if (!func)
410 		return -EINVAL;
411 
412 	/* Check if there's anything to change. */
413 	if (tr->ops->func == func)
414 		return 0;
415 
416 	if (!handle_func_repeats(tr, new_flags))
417 		return -ENOMEM;
418 
419 	unregister_ftrace_function(tr->ops);
420 	tr->ops->func = func;
421 	register_ftrace_function(tr->ops);
422 
423 	return 0;
424 }
425 
426 static struct tracer function_trace __tracer_data =
427 {
428 	.name		= "function",
429 	.init		= function_trace_init,
430 	.reset		= function_trace_reset,
431 	.start		= function_trace_start,
432 	.flags		= &func_flags,
433 	.set_flag	= func_set_flag,
434 	.allow_instances = true,
435 #ifdef CONFIG_FTRACE_SELFTEST
436 	.selftest	= trace_selftest_startup_function,
437 #endif
438 };
439 
440 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)441 static void update_traceon_count(struct ftrace_probe_ops *ops,
442 				 unsigned long ip,
443 				 struct trace_array *tr, bool on,
444 				 void *data)
445 {
446 	struct ftrace_func_mapper *mapper = data;
447 	long *count;
448 	long old_count;
449 
450 	/*
451 	 * Tracing gets disabled (or enabled) once per count.
452 	 * This function can be called at the same time on multiple CPUs.
453 	 * It is fine if both disable (or enable) tracing, as disabling
454 	 * (or enabling) the second time doesn't do anything as the
455 	 * state of the tracer is already disabled (or enabled).
456 	 * What needs to be synchronized in this case is that the count
457 	 * only gets decremented once, even if the tracer is disabled
458 	 * (or enabled) twice, as the second one is really a nop.
459 	 *
460 	 * The memory barriers guarantee that we only decrement the
461 	 * counter once. First the count is read to a local variable
462 	 * and a read barrier is used to make sure that it is loaded
463 	 * before checking if the tracer is in the state we want.
464 	 * If the tracer is not in the state we want, then the count
465 	 * is guaranteed to be the old count.
466 	 *
467 	 * Next the tracer is set to the state we want (disabled or enabled)
468 	 * then a write memory barrier is used to make sure that
469 	 * the new state is visible before changing the counter by
470 	 * one minus the old counter. This guarantees that another CPU
471 	 * executing this code will see the new state before seeing
472 	 * the new counter value, and would not do anything if the new
473 	 * counter is seen.
474 	 *
475 	 * Note, there is no synchronization between this and a user
476 	 * setting the tracing_on file. But we currently don't care
477 	 * about that.
478 	 */
479 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
480 	old_count = *count;
481 
482 	if (old_count <= 0)
483 		return;
484 
485 	/* Make sure we see count before checking tracing state */
486 	smp_rmb();
487 
488 	if (on == !!tracer_tracing_is_on(tr))
489 		return;
490 
491 	if (on)
492 		tracer_tracing_on(tr);
493 	else
494 		tracer_tracing_off(tr);
495 
496 	/* Make sure tracing state is visible before updating count */
497 	smp_wmb();
498 
499 	*count = old_count - 1;
500 }
501 
502 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)503 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
504 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
505 		     void *data)
506 {
507 	update_traceon_count(ops, ip, tr, 1, data);
508 }
509 
510 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)511 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
512 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
513 		      void *data)
514 {
515 	update_traceon_count(ops, ip, tr, 0, data);
516 }
517 
518 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)519 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
520 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
521 	       void *data)
522 {
523 	if (tracer_tracing_is_on(tr))
524 		return;
525 
526 	tracer_tracing_on(tr);
527 }
528 
529 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)530 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
531 		struct trace_array *tr, struct ftrace_probe_ops *ops,
532 		void *data)
533 {
534 	if (!tracer_tracing_is_on(tr))
535 		return;
536 
537 	tracer_tracing_off(tr);
538 }
539 
540 #ifdef CONFIG_UNWINDER_ORC
541 /*
542  * Skip 3:
543  *
544  *   function_trace_probe_call()
545  *   ftrace_ops_assist_func()
546  *   ftrace_call()
547  */
548 #define FTRACE_STACK_SKIP 3
549 #else
550 /*
551  * Skip 5:
552  *
553  *   __trace_stack()
554  *   ftrace_stacktrace()
555  *   function_trace_probe_call()
556  *   ftrace_ops_assist_func()
557  *   ftrace_call()
558  */
559 #define FTRACE_STACK_SKIP 5
560 #endif
561 
trace_stack(struct trace_array * tr)562 static __always_inline void trace_stack(struct trace_array *tr)
563 {
564 	__trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP);
565 }
566 
567 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)568 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
569 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
570 		  void *data)
571 {
572 	trace_stack(tr);
573 }
574 
575 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)576 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
577 			struct trace_array *tr, struct ftrace_probe_ops *ops,
578 			void *data)
579 {
580 	struct ftrace_func_mapper *mapper = data;
581 	long *count;
582 	long old_count;
583 	long new_count;
584 
585 	if (!tracing_is_on())
586 		return;
587 
588 	/* unlimited? */
589 	if (!mapper) {
590 		trace_stack(tr);
591 		return;
592 	}
593 
594 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
595 
596 	/*
597 	 * Stack traces should only execute the number of times the
598 	 * user specified in the counter.
599 	 */
600 	do {
601 		old_count = *count;
602 
603 		if (!old_count)
604 			return;
605 
606 		new_count = old_count - 1;
607 		new_count = cmpxchg(count, old_count, new_count);
608 		if (new_count == old_count)
609 			trace_stack(tr);
610 
611 		if (!tracing_is_on())
612 			return;
613 
614 	} while (new_count != old_count);
615 }
616 
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)617 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
618 			void *data)
619 {
620 	struct ftrace_func_mapper *mapper = data;
621 	long *count = NULL;
622 
623 	if (mapper)
624 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
625 
626 	if (count) {
627 		if (*count <= 0)
628 			return 0;
629 		(*count)--;
630 	}
631 
632 	return 1;
633 }
634 
635 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)636 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
637 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
638 		  void *data)
639 {
640 	if (update_count(ops, ip, data))
641 		ftrace_dump(DUMP_ALL);
642 }
643 
644 /* Only dump the current CPU buffer. */
645 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)646 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
647 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
648 		     void *data)
649 {
650 	if (update_count(ops, ip, data))
651 		ftrace_dump(DUMP_ORIG);
652 }
653 
654 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)655 ftrace_probe_print(const char *name, struct seq_file *m,
656 		   unsigned long ip, struct ftrace_probe_ops *ops,
657 		   void *data)
658 {
659 	struct ftrace_func_mapper *mapper = data;
660 	long *count = NULL;
661 
662 	seq_printf(m, "%ps:%s", (void *)ip, name);
663 
664 	if (mapper)
665 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
666 
667 	if (count)
668 		seq_printf(m, ":count=%ld\n", *count);
669 	else
670 		seq_puts(m, ":unlimited\n");
671 
672 	return 0;
673 }
674 
675 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)676 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
677 		     struct ftrace_probe_ops *ops,
678 		     void *data)
679 {
680 	return ftrace_probe_print("traceon", m, ip, ops, data);
681 }
682 
683 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)684 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
685 			 struct ftrace_probe_ops *ops, void *data)
686 {
687 	return ftrace_probe_print("traceoff", m, ip, ops, data);
688 }
689 
690 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)691 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
692 			struct ftrace_probe_ops *ops, void *data)
693 {
694 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
695 }
696 
697 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)698 ftrace_dump_print(struct seq_file *m, unsigned long ip,
699 			struct ftrace_probe_ops *ops, void *data)
700 {
701 	return ftrace_probe_print("dump", m, ip, ops, data);
702 }
703 
704 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)705 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
706 			struct ftrace_probe_ops *ops, void *data)
707 {
708 	return ftrace_probe_print("cpudump", m, ip, ops, data);
709 }
710 
711 
712 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)713 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
714 		  unsigned long ip, void *init_data, void **data)
715 {
716 	struct ftrace_func_mapper *mapper = *data;
717 
718 	if (!mapper) {
719 		mapper = allocate_ftrace_func_mapper();
720 		if (!mapper)
721 			return -ENOMEM;
722 		*data = mapper;
723 	}
724 
725 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
726 }
727 
728 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)729 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
730 		  unsigned long ip, void *data)
731 {
732 	struct ftrace_func_mapper *mapper = data;
733 
734 	if (!ip) {
735 		free_ftrace_func_mapper(mapper, NULL);
736 		return;
737 	}
738 
739 	ftrace_func_mapper_remove_ip(mapper, ip);
740 }
741 
742 static struct ftrace_probe_ops traceon_count_probe_ops = {
743 	.func			= ftrace_traceon_count,
744 	.print			= ftrace_traceon_print,
745 	.init			= ftrace_count_init,
746 	.free			= ftrace_count_free,
747 };
748 
749 static struct ftrace_probe_ops traceoff_count_probe_ops = {
750 	.func			= ftrace_traceoff_count,
751 	.print			= ftrace_traceoff_print,
752 	.init			= ftrace_count_init,
753 	.free			= ftrace_count_free,
754 };
755 
756 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
757 	.func			= ftrace_stacktrace_count,
758 	.print			= ftrace_stacktrace_print,
759 	.init			= ftrace_count_init,
760 	.free			= ftrace_count_free,
761 };
762 
763 static struct ftrace_probe_ops dump_probe_ops = {
764 	.func			= ftrace_dump_probe,
765 	.print			= ftrace_dump_print,
766 	.init			= ftrace_count_init,
767 	.free			= ftrace_count_free,
768 };
769 
770 static struct ftrace_probe_ops cpudump_probe_ops = {
771 	.func			= ftrace_cpudump_probe,
772 	.print			= ftrace_cpudump_print,
773 };
774 
775 static struct ftrace_probe_ops traceon_probe_ops = {
776 	.func			= ftrace_traceon,
777 	.print			= ftrace_traceon_print,
778 };
779 
780 static struct ftrace_probe_ops traceoff_probe_ops = {
781 	.func			= ftrace_traceoff,
782 	.print			= ftrace_traceoff_print,
783 };
784 
785 static struct ftrace_probe_ops stacktrace_probe_ops = {
786 	.func			= ftrace_stacktrace,
787 	.print			= ftrace_stacktrace_print,
788 };
789 
790 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)791 ftrace_trace_probe_callback(struct trace_array *tr,
792 			    struct ftrace_probe_ops *ops,
793 			    struct ftrace_hash *hash, char *glob,
794 			    char *cmd, char *param, int enable)
795 {
796 	void *count = (void *)-1;
797 	char *number;
798 	int ret;
799 
800 	/* hash funcs only work with set_ftrace_filter */
801 	if (!enable)
802 		return -EINVAL;
803 
804 	if (glob[0] == '!')
805 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
806 
807 	if (!param)
808 		goto out_reg;
809 
810 	number = strsep(&param, ":");
811 
812 	if (!strlen(number))
813 		goto out_reg;
814 
815 	/*
816 	 * We use the callback data field (which is a pointer)
817 	 * as our counter.
818 	 */
819 	ret = kstrtoul(number, 0, (unsigned long *)&count);
820 	if (ret)
821 		return ret;
822 
823  out_reg:
824 	ret = register_ftrace_function_probe(glob, tr, ops, count);
825 
826 	return ret < 0 ? ret : 0;
827 }
828 
829 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)830 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
831 			    char *glob, char *cmd, char *param, int enable)
832 {
833 	struct ftrace_probe_ops *ops;
834 
835 	if (!tr)
836 		return -ENODEV;
837 
838 	/* we register both traceon and traceoff to this callback */
839 	if (strcmp(cmd, "traceon") == 0)
840 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
841 	else
842 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
843 
844 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
845 					   param, enable);
846 }
847 
848 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)849 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
850 			   char *glob, char *cmd, char *param, int enable)
851 {
852 	struct ftrace_probe_ops *ops;
853 
854 	if (!tr)
855 		return -ENODEV;
856 
857 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
858 
859 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
860 					   param, enable);
861 }
862 
863 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)864 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
865 			   char *glob, char *cmd, char *param, int enable)
866 {
867 	struct ftrace_probe_ops *ops;
868 
869 	if (!tr)
870 		return -ENODEV;
871 
872 	ops = &dump_probe_ops;
873 
874 	/* Only dump once. */
875 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
876 					   "1", enable);
877 }
878 
879 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)880 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
881 			   char *glob, char *cmd, char *param, int enable)
882 {
883 	struct ftrace_probe_ops *ops;
884 
885 	if (!tr)
886 		return -ENODEV;
887 
888 	ops = &cpudump_probe_ops;
889 
890 	/* Only dump once. */
891 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
892 					   "1", enable);
893 }
894 
895 static struct ftrace_func_command ftrace_traceon_cmd = {
896 	.name			= "traceon",
897 	.func			= ftrace_trace_onoff_callback,
898 };
899 
900 static struct ftrace_func_command ftrace_traceoff_cmd = {
901 	.name			= "traceoff",
902 	.func			= ftrace_trace_onoff_callback,
903 };
904 
905 static struct ftrace_func_command ftrace_stacktrace_cmd = {
906 	.name			= "stacktrace",
907 	.func			= ftrace_stacktrace_callback,
908 };
909 
910 static struct ftrace_func_command ftrace_dump_cmd = {
911 	.name			= "dump",
912 	.func			= ftrace_dump_callback,
913 };
914 
915 static struct ftrace_func_command ftrace_cpudump_cmd = {
916 	.name			= "cpudump",
917 	.func			= ftrace_cpudump_callback,
918 };
919 
init_func_cmd_traceon(void)920 static int __init init_func_cmd_traceon(void)
921 {
922 	int ret;
923 
924 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
925 	if (ret)
926 		return ret;
927 
928 	ret = register_ftrace_command(&ftrace_traceon_cmd);
929 	if (ret)
930 		goto out_free_traceoff;
931 
932 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
933 	if (ret)
934 		goto out_free_traceon;
935 
936 	ret = register_ftrace_command(&ftrace_dump_cmd);
937 	if (ret)
938 		goto out_free_stacktrace;
939 
940 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
941 	if (ret)
942 		goto out_free_dump;
943 
944 	return 0;
945 
946  out_free_dump:
947 	unregister_ftrace_command(&ftrace_dump_cmd);
948  out_free_stacktrace:
949 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
950  out_free_traceon:
951 	unregister_ftrace_command(&ftrace_traceon_cmd);
952  out_free_traceoff:
953 	unregister_ftrace_command(&ftrace_traceoff_cmd);
954 
955 	return ret;
956 }
957 #else
init_func_cmd_traceon(void)958 static inline int init_func_cmd_traceon(void)
959 {
960 	return 0;
961 }
962 #endif /* CONFIG_DYNAMIC_FTRACE */
963 
init_function_trace(void)964 __init int init_function_trace(void)
965 {
966 	init_func_cmd_traceon();
967 	return register_tracer(&function_trace);
968 }
969