1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	/*
84 	 * The top level array uses the "global_ops", and the files are
85 	 * created on boot up.
86 	 */
87 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 		return 0;
89 
90 	if (!tr->ops)
91 		return -EINVAL;
92 
93 	ftrace_create_filter_files(tr->ops, parent);
94 
95 	return 0;
96 }
97 
98 void ftrace_destroy_function_files(struct trace_array *tr)
99 {
100 	ftrace_destroy_filter_files(tr->ops);
101 	ftrace_free_ftrace_ops(tr);
102 }
103 
104 static ftrace_func_t select_trace_function(u32 flags_val)
105 {
106 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 	case TRACE_FUNC_NO_OPTS:
108 		return function_trace_call;
109 	case TRACE_FUNC_OPT_STACK:
110 		return function_stack_trace_call;
111 	case TRACE_FUNC_OPT_NO_REPEATS:
112 		return function_no_repeats_trace_call;
113 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 		return function_stack_no_repeats_trace_call;
115 	default:
116 		return NULL;
117 	}
118 }
119 
120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121 {
122 	if (!tr->last_func_repeats &&
123 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 		if (!tr->last_func_repeats)
126 			return false;
127 	}
128 
129 	return true;
130 }
131 
132 static int function_trace_init(struct trace_array *tr)
133 {
134 	ftrace_func_t func;
135 	/*
136 	 * Instance trace_arrays get their ops allocated
137 	 * at instance creation. Unless it failed
138 	 * the allocation.
139 	 */
140 	if (!tr->ops)
141 		return -ENOMEM;
142 
143 	func = select_trace_function(func_flags.val);
144 	if (!func)
145 		return -EINVAL;
146 
147 	if (!handle_func_repeats(tr, func_flags.val))
148 		return -ENOMEM;
149 
150 	ftrace_init_array_ops(tr, func);
151 
152 	tr->array_buffer.cpu = raw_smp_processor_id();
153 
154 	tracing_start_cmdline_record();
155 	tracing_start_function_trace(tr);
156 	return 0;
157 }
158 
159 static void function_trace_reset(struct trace_array *tr)
160 {
161 	tracing_stop_function_trace(tr);
162 	tracing_stop_cmdline_record();
163 	ftrace_reset_array_ops(tr);
164 }
165 
166 static void function_trace_start(struct trace_array *tr)
167 {
168 	tracing_reset_online_cpus(&tr->array_buffer);
169 }
170 
171 static void
172 function_trace_call(unsigned long ip, unsigned long parent_ip,
173 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174 {
175 	struct trace_array *tr = op->private;
176 	struct trace_array_cpu *data;
177 	unsigned int trace_ctx;
178 	int bit;
179 	int cpu;
180 
181 	if (unlikely(!tr->function_enabled))
182 		return;
183 
184 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 	if (bit < 0)
186 		return;
187 
188 	trace_ctx = tracing_gen_ctx();
189 	preempt_disable_notrace();
190 
191 	cpu = smp_processor_id();
192 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
193 	if (!atomic_read(&data->disabled))
194 		trace_function(tr, ip, parent_ip, trace_ctx);
195 
196 	ftrace_test_recursion_unlock(bit);
197 	preempt_enable_notrace();
198 }
199 
200 #ifdef CONFIG_UNWINDER_ORC
201 /*
202  * Skip 2:
203  *
204  *   function_stack_trace_call()
205  *   ftrace_call()
206  */
207 #define STACK_SKIP 2
208 #else
209 /*
210  * Skip 3:
211  *   __trace_stack()
212  *   function_stack_trace_call()
213  *   ftrace_call()
214  */
215 #define STACK_SKIP 3
216 #endif
217 
218 static void
219 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
221 {
222 	struct trace_array *tr = op->private;
223 	struct trace_array_cpu *data;
224 	unsigned long flags;
225 	long disabled;
226 	int cpu;
227 	unsigned int trace_ctx;
228 
229 	if (unlikely(!tr->function_enabled))
230 		return;
231 
232 	/*
233 	 * Need to use raw, since this must be called before the
234 	 * recursive protection is performed.
235 	 */
236 	local_irq_save(flags);
237 	cpu = raw_smp_processor_id();
238 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
239 	disabled = atomic_inc_return(&data->disabled);
240 
241 	if (likely(disabled == 1)) {
242 		trace_ctx = tracing_gen_ctx_flags(flags);
243 		trace_function(tr, ip, parent_ip, trace_ctx);
244 		__trace_stack(tr, trace_ctx, STACK_SKIP);
245 	}
246 
247 	atomic_dec(&data->disabled);
248 	local_irq_restore(flags);
249 }
250 
251 static inline bool is_repeat_check(struct trace_array *tr,
252 				   struct trace_func_repeats *last_info,
253 				   unsigned long ip, unsigned long parent_ip)
254 {
255 	if (last_info->ip == ip &&
256 	    last_info->parent_ip == parent_ip &&
257 	    last_info->count < U16_MAX) {
258 		last_info->ts_last_call =
259 			ring_buffer_time_stamp(tr->array_buffer.buffer);
260 		last_info->count++;
261 		return true;
262 	}
263 
264 	return false;
265 }
266 
267 static inline void process_repeats(struct trace_array *tr,
268 				   unsigned long ip, unsigned long parent_ip,
269 				   struct trace_func_repeats *last_info,
270 				   unsigned int trace_ctx)
271 {
272 	if (last_info->count) {
273 		trace_last_func_repeats(tr, last_info, trace_ctx);
274 		last_info->count = 0;
275 	}
276 
277 	last_info->ip = ip;
278 	last_info->parent_ip = parent_ip;
279 }
280 
281 static void
282 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283 			       struct ftrace_ops *op,
284 			       struct ftrace_regs *fregs)
285 {
286 	struct trace_func_repeats *last_info;
287 	struct trace_array *tr = op->private;
288 	struct trace_array_cpu *data;
289 	unsigned int trace_ctx;
290 	unsigned long flags;
291 	int bit;
292 	int cpu;
293 
294 	if (unlikely(!tr->function_enabled))
295 		return;
296 
297 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
298 	if (bit < 0)
299 		return;
300 
301 	preempt_disable_notrace();
302 
303 	cpu = smp_processor_id();
304 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
305 	if (atomic_read(&data->disabled))
306 		goto out;
307 
308 	/*
309 	 * An interrupt may happen at any place here. But as far as I can see,
310 	 * the only damage that this can cause is to mess up the repetition
311 	 * counter without valuable data being lost.
312 	 * TODO: think about a solution that is better than just hoping to be
313 	 * lucky.
314 	 */
315 	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316 	if (is_repeat_check(tr, last_info, ip, parent_ip))
317 		goto out;
318 
319 	local_save_flags(flags);
320 	trace_ctx = tracing_gen_ctx_flags(flags);
321 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322 
323 	trace_function(tr, ip, parent_ip, trace_ctx);
324 
325 out:
326 	ftrace_test_recursion_unlock(bit);
327 	preempt_enable_notrace();
328 }
329 
330 static void
331 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332 				     struct ftrace_ops *op,
333 				     struct ftrace_regs *fregs)
334 {
335 	struct trace_func_repeats *last_info;
336 	struct trace_array *tr = op->private;
337 	struct trace_array_cpu *data;
338 	unsigned long flags;
339 	long disabled;
340 	int cpu;
341 	unsigned int trace_ctx;
342 
343 	if (unlikely(!tr->function_enabled))
344 		return;
345 
346 	/*
347 	 * Need to use raw, since this must be called before the
348 	 * recursive protection is performed.
349 	 */
350 	local_irq_save(flags);
351 	cpu = raw_smp_processor_id();
352 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
353 	disabled = atomic_inc_return(&data->disabled);
354 
355 	if (likely(disabled == 1)) {
356 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357 		if (is_repeat_check(tr, last_info, ip, parent_ip))
358 			goto out;
359 
360 		trace_ctx = tracing_gen_ctx_flags(flags);
361 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362 
363 		trace_function(tr, ip, parent_ip, trace_ctx);
364 		__trace_stack(tr, trace_ctx, STACK_SKIP);
365 	}
366 
367  out:
368 	atomic_dec(&data->disabled);
369 	local_irq_restore(flags);
370 }
371 
372 static struct tracer_opt func_opts[] = {
373 #ifdef CONFIG_STACKTRACE
374 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375 #endif
376 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377 	{ } /* Always set a last empty entry */
378 };
379 
380 static struct tracer_flags func_flags = {
381 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
382 	.opts = func_opts
383 };
384 
385 static void tracing_start_function_trace(struct trace_array *tr)
386 {
387 	tr->function_enabled = 0;
388 	register_ftrace_function(tr->ops);
389 	tr->function_enabled = 1;
390 }
391 
392 static void tracing_stop_function_trace(struct trace_array *tr)
393 {
394 	tr->function_enabled = 0;
395 	unregister_ftrace_function(tr->ops);
396 }
397 
398 static struct tracer function_trace;
399 
400 static int
401 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402 {
403 	ftrace_func_t func;
404 	u32 new_flags;
405 
406 	/* Do nothing if already set. */
407 	if (!!set == !!(func_flags.val & bit))
408 		return 0;
409 
410 	/* We can change this flag only when not running. */
411 	if (tr->current_trace != &function_trace)
412 		return 0;
413 
414 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415 	func = select_trace_function(new_flags);
416 	if (!func)
417 		return -EINVAL;
418 
419 	/* Check if there's anything to change. */
420 	if (tr->ops->func == func)
421 		return 0;
422 
423 	if (!handle_func_repeats(tr, new_flags))
424 		return -ENOMEM;
425 
426 	unregister_ftrace_function(tr->ops);
427 	tr->ops->func = func;
428 	register_ftrace_function(tr->ops);
429 
430 	return 0;
431 }
432 
433 static struct tracer function_trace __tracer_data =
434 {
435 	.name		= "function",
436 	.init		= function_trace_init,
437 	.reset		= function_trace_reset,
438 	.start		= function_trace_start,
439 	.flags		= &func_flags,
440 	.set_flag	= func_set_flag,
441 	.allow_instances = true,
442 #ifdef CONFIG_FTRACE_SELFTEST
443 	.selftest	= trace_selftest_startup_function,
444 #endif
445 };
446 
447 #ifdef CONFIG_DYNAMIC_FTRACE
448 static void update_traceon_count(struct ftrace_probe_ops *ops,
449 				 unsigned long ip,
450 				 struct trace_array *tr, bool on,
451 				 void *data)
452 {
453 	struct ftrace_func_mapper *mapper = data;
454 	long *count;
455 	long old_count;
456 
457 	/*
458 	 * Tracing gets disabled (or enabled) once per count.
459 	 * This function can be called at the same time on multiple CPUs.
460 	 * It is fine if both disable (or enable) tracing, as disabling
461 	 * (or enabling) the second time doesn't do anything as the
462 	 * state of the tracer is already disabled (or enabled).
463 	 * What needs to be synchronized in this case is that the count
464 	 * only gets decremented once, even if the tracer is disabled
465 	 * (or enabled) twice, as the second one is really a nop.
466 	 *
467 	 * The memory barriers guarantee that we only decrement the
468 	 * counter once. First the count is read to a local variable
469 	 * and a read barrier is used to make sure that it is loaded
470 	 * before checking if the tracer is in the state we want.
471 	 * If the tracer is not in the state we want, then the count
472 	 * is guaranteed to be the old count.
473 	 *
474 	 * Next the tracer is set to the state we want (disabled or enabled)
475 	 * then a write memory barrier is used to make sure that
476 	 * the new state is visible before changing the counter by
477 	 * one minus the old counter. This guarantees that another CPU
478 	 * executing this code will see the new state before seeing
479 	 * the new counter value, and would not do anything if the new
480 	 * counter is seen.
481 	 *
482 	 * Note, there is no synchronization between this and a user
483 	 * setting the tracing_on file. But we currently don't care
484 	 * about that.
485 	 */
486 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487 	old_count = *count;
488 
489 	if (old_count <= 0)
490 		return;
491 
492 	/* Make sure we see count before checking tracing state */
493 	smp_rmb();
494 
495 	if (on == !!tracer_tracing_is_on(tr))
496 		return;
497 
498 	if (on)
499 		tracer_tracing_on(tr);
500 	else
501 		tracer_tracing_off(tr);
502 
503 	/* Make sure tracing state is visible before updating count */
504 	smp_wmb();
505 
506 	*count = old_count - 1;
507 }
508 
509 static void
510 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
512 		     void *data)
513 {
514 	update_traceon_count(ops, ip, tr, 1, data);
515 }
516 
517 static void
518 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
520 		      void *data)
521 {
522 	update_traceon_count(ops, ip, tr, 0, data);
523 }
524 
525 static void
526 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
528 	       void *data)
529 {
530 	if (tracer_tracing_is_on(tr))
531 		return;
532 
533 	tracer_tracing_on(tr);
534 }
535 
536 static void
537 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538 		struct trace_array *tr, struct ftrace_probe_ops *ops,
539 		void *data)
540 {
541 	if (!tracer_tracing_is_on(tr))
542 		return;
543 
544 	tracer_tracing_off(tr);
545 }
546 
547 #ifdef CONFIG_UNWINDER_ORC
548 /*
549  * Skip 3:
550  *
551  *   function_trace_probe_call()
552  *   ftrace_ops_assist_func()
553  *   ftrace_call()
554  */
555 #define FTRACE_STACK_SKIP 3
556 #else
557 /*
558  * Skip 5:
559  *
560  *   __trace_stack()
561  *   ftrace_stacktrace()
562  *   function_trace_probe_call()
563  *   ftrace_ops_assist_func()
564  *   ftrace_call()
565  */
566 #define FTRACE_STACK_SKIP 5
567 #endif
568 
569 static __always_inline void trace_stack(struct trace_array *tr)
570 {
571 	unsigned int trace_ctx;
572 
573 	trace_ctx = tracing_gen_ctx();
574 
575 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576 }
577 
578 static void
579 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
581 		  void *data)
582 {
583 	trace_stack(tr);
584 }
585 
586 static void
587 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588 			struct trace_array *tr, struct ftrace_probe_ops *ops,
589 			void *data)
590 {
591 	struct ftrace_func_mapper *mapper = data;
592 	long *count;
593 	long old_count;
594 	long new_count;
595 
596 	if (!tracing_is_on())
597 		return;
598 
599 	/* unlimited? */
600 	if (!mapper) {
601 		trace_stack(tr);
602 		return;
603 	}
604 
605 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606 
607 	/*
608 	 * Stack traces should only execute the number of times the
609 	 * user specified in the counter.
610 	 */
611 	do {
612 		old_count = *count;
613 
614 		if (!old_count)
615 			return;
616 
617 		new_count = old_count - 1;
618 		new_count = cmpxchg(count, old_count, new_count);
619 		if (new_count == old_count)
620 			trace_stack(tr);
621 
622 		if (!tracing_is_on())
623 			return;
624 
625 	} while (new_count != old_count);
626 }
627 
628 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629 			void *data)
630 {
631 	struct ftrace_func_mapper *mapper = data;
632 	long *count = NULL;
633 
634 	if (mapper)
635 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636 
637 	if (count) {
638 		if (*count <= 0)
639 			return 0;
640 		(*count)--;
641 	}
642 
643 	return 1;
644 }
645 
646 static void
647 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
649 		  void *data)
650 {
651 	if (update_count(ops, ip, data))
652 		ftrace_dump(DUMP_ALL);
653 }
654 
655 /* Only dump the current CPU buffer. */
656 static void
657 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
659 		     void *data)
660 {
661 	if (update_count(ops, ip, data))
662 		ftrace_dump(DUMP_ORIG);
663 }
664 
665 static int
666 ftrace_probe_print(const char *name, struct seq_file *m,
667 		   unsigned long ip, struct ftrace_probe_ops *ops,
668 		   void *data)
669 {
670 	struct ftrace_func_mapper *mapper = data;
671 	long *count = NULL;
672 
673 	seq_printf(m, "%ps:%s", (void *)ip, name);
674 
675 	if (mapper)
676 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677 
678 	if (count)
679 		seq_printf(m, ":count=%ld\n", *count);
680 	else
681 		seq_puts(m, ":unlimited\n");
682 
683 	return 0;
684 }
685 
686 static int
687 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688 		     struct ftrace_probe_ops *ops,
689 		     void *data)
690 {
691 	return ftrace_probe_print("traceon", m, ip, ops, data);
692 }
693 
694 static int
695 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696 			 struct ftrace_probe_ops *ops, void *data)
697 {
698 	return ftrace_probe_print("traceoff", m, ip, ops, data);
699 }
700 
701 static int
702 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703 			struct ftrace_probe_ops *ops, void *data)
704 {
705 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
706 }
707 
708 static int
709 ftrace_dump_print(struct seq_file *m, unsigned long ip,
710 			struct ftrace_probe_ops *ops, void *data)
711 {
712 	return ftrace_probe_print("dump", m, ip, ops, data);
713 }
714 
715 static int
716 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717 			struct ftrace_probe_ops *ops, void *data)
718 {
719 	return ftrace_probe_print("cpudump", m, ip, ops, data);
720 }
721 
722 
723 static int
724 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725 		  unsigned long ip, void *init_data, void **data)
726 {
727 	struct ftrace_func_mapper *mapper = *data;
728 
729 	if (!mapper) {
730 		mapper = allocate_ftrace_func_mapper();
731 		if (!mapper)
732 			return -ENOMEM;
733 		*data = mapper;
734 	}
735 
736 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737 }
738 
739 static void
740 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741 		  unsigned long ip, void *data)
742 {
743 	struct ftrace_func_mapper *mapper = data;
744 
745 	if (!ip) {
746 		free_ftrace_func_mapper(mapper, NULL);
747 		return;
748 	}
749 
750 	ftrace_func_mapper_remove_ip(mapper, ip);
751 }
752 
753 static struct ftrace_probe_ops traceon_count_probe_ops = {
754 	.func			= ftrace_traceon_count,
755 	.print			= ftrace_traceon_print,
756 	.init			= ftrace_count_init,
757 	.free			= ftrace_count_free,
758 };
759 
760 static struct ftrace_probe_ops traceoff_count_probe_ops = {
761 	.func			= ftrace_traceoff_count,
762 	.print			= ftrace_traceoff_print,
763 	.init			= ftrace_count_init,
764 	.free			= ftrace_count_free,
765 };
766 
767 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768 	.func			= ftrace_stacktrace_count,
769 	.print			= ftrace_stacktrace_print,
770 	.init			= ftrace_count_init,
771 	.free			= ftrace_count_free,
772 };
773 
774 static struct ftrace_probe_ops dump_probe_ops = {
775 	.func			= ftrace_dump_probe,
776 	.print			= ftrace_dump_print,
777 	.init			= ftrace_count_init,
778 	.free			= ftrace_count_free,
779 };
780 
781 static struct ftrace_probe_ops cpudump_probe_ops = {
782 	.func			= ftrace_cpudump_probe,
783 	.print			= ftrace_cpudump_print,
784 };
785 
786 static struct ftrace_probe_ops traceon_probe_ops = {
787 	.func			= ftrace_traceon,
788 	.print			= ftrace_traceon_print,
789 };
790 
791 static struct ftrace_probe_ops traceoff_probe_ops = {
792 	.func			= ftrace_traceoff,
793 	.print			= ftrace_traceoff_print,
794 };
795 
796 static struct ftrace_probe_ops stacktrace_probe_ops = {
797 	.func			= ftrace_stacktrace,
798 	.print			= ftrace_stacktrace_print,
799 };
800 
801 static int
802 ftrace_trace_probe_callback(struct trace_array *tr,
803 			    struct ftrace_probe_ops *ops,
804 			    struct ftrace_hash *hash, char *glob,
805 			    char *cmd, char *param, int enable)
806 {
807 	void *count = (void *)-1;
808 	char *number;
809 	int ret;
810 
811 	/* hash funcs only work with set_ftrace_filter */
812 	if (!enable)
813 		return -EINVAL;
814 
815 	if (glob[0] == '!')
816 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
817 
818 	if (!param)
819 		goto out_reg;
820 
821 	number = strsep(&param, ":");
822 
823 	if (!strlen(number))
824 		goto out_reg;
825 
826 	/*
827 	 * We use the callback data field (which is a pointer)
828 	 * as our counter.
829 	 */
830 	ret = kstrtoul(number, 0, (unsigned long *)&count);
831 	if (ret)
832 		return ret;
833 
834  out_reg:
835 	ret = register_ftrace_function_probe(glob, tr, ops, count);
836 
837 	return ret < 0 ? ret : 0;
838 }
839 
840 static int
841 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842 			    char *glob, char *cmd, char *param, int enable)
843 {
844 	struct ftrace_probe_ops *ops;
845 
846 	if (!tr)
847 		return -ENODEV;
848 
849 	/* we register both traceon and traceoff to this callback */
850 	if (strcmp(cmd, "traceon") == 0)
851 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852 	else
853 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854 
855 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856 					   param, enable);
857 }
858 
859 static int
860 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861 			   char *glob, char *cmd, char *param, int enable)
862 {
863 	struct ftrace_probe_ops *ops;
864 
865 	if (!tr)
866 		return -ENODEV;
867 
868 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869 
870 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871 					   param, enable);
872 }
873 
874 static int
875 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876 			   char *glob, char *cmd, char *param, int enable)
877 {
878 	struct ftrace_probe_ops *ops;
879 
880 	if (!tr)
881 		return -ENODEV;
882 
883 	ops = &dump_probe_ops;
884 
885 	/* Only dump once. */
886 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887 					   "1", enable);
888 }
889 
890 static int
891 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892 			   char *glob, char *cmd, char *param, int enable)
893 {
894 	struct ftrace_probe_ops *ops;
895 
896 	if (!tr)
897 		return -ENODEV;
898 
899 	ops = &cpudump_probe_ops;
900 
901 	/* Only dump once. */
902 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903 					   "1", enable);
904 }
905 
906 static struct ftrace_func_command ftrace_traceon_cmd = {
907 	.name			= "traceon",
908 	.func			= ftrace_trace_onoff_callback,
909 };
910 
911 static struct ftrace_func_command ftrace_traceoff_cmd = {
912 	.name			= "traceoff",
913 	.func			= ftrace_trace_onoff_callback,
914 };
915 
916 static struct ftrace_func_command ftrace_stacktrace_cmd = {
917 	.name			= "stacktrace",
918 	.func			= ftrace_stacktrace_callback,
919 };
920 
921 static struct ftrace_func_command ftrace_dump_cmd = {
922 	.name			= "dump",
923 	.func			= ftrace_dump_callback,
924 };
925 
926 static struct ftrace_func_command ftrace_cpudump_cmd = {
927 	.name			= "cpudump",
928 	.func			= ftrace_cpudump_callback,
929 };
930 
931 static int __init init_func_cmd_traceon(void)
932 {
933 	int ret;
934 
935 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
936 	if (ret)
937 		return ret;
938 
939 	ret = register_ftrace_command(&ftrace_traceon_cmd);
940 	if (ret)
941 		goto out_free_traceoff;
942 
943 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944 	if (ret)
945 		goto out_free_traceon;
946 
947 	ret = register_ftrace_command(&ftrace_dump_cmd);
948 	if (ret)
949 		goto out_free_stacktrace;
950 
951 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
952 	if (ret)
953 		goto out_free_dump;
954 
955 	return 0;
956 
957  out_free_dump:
958 	unregister_ftrace_command(&ftrace_dump_cmd);
959  out_free_stacktrace:
960 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
961  out_free_traceon:
962 	unregister_ftrace_command(&ftrace_traceon_cmd);
963  out_free_traceoff:
964 	unregister_ftrace_command(&ftrace_traceoff_cmd);
965 
966 	return ret;
967 }
968 #else
969 static inline int init_func_cmd_traceon(void)
970 {
971 	return 0;
972 }
973 #endif /* CONFIG_DYNAMIC_FTRACE */
974 
975 __init int init_function_trace(void)
976 {
977 	init_func_cmd_traceon();
978 	return register_tracer(&function_trace);
979 }
980