1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	/*
84 	 * The top level array uses the "global_ops", and the files are
85 	 * created on boot up.
86 	 */
87 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 		return 0;
89 
90 	if (!tr->ops)
91 		return -EINVAL;
92 
93 	ftrace_create_filter_files(tr->ops, parent);
94 
95 	return 0;
96 }
97 
98 void ftrace_destroy_function_files(struct trace_array *tr)
99 {
100 	ftrace_destroy_filter_files(tr->ops);
101 	ftrace_free_ftrace_ops(tr);
102 }
103 
104 static ftrace_func_t select_trace_function(u32 flags_val)
105 {
106 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 	case TRACE_FUNC_NO_OPTS:
108 		return function_trace_call;
109 	case TRACE_FUNC_OPT_STACK:
110 		return function_stack_trace_call;
111 	case TRACE_FUNC_OPT_NO_REPEATS:
112 		return function_no_repeats_trace_call;
113 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 		return function_stack_no_repeats_trace_call;
115 	default:
116 		return NULL;
117 	}
118 }
119 
120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121 {
122 	if (!tr->last_func_repeats &&
123 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 		if (!tr->last_func_repeats)
126 			return false;
127 	}
128 
129 	return true;
130 }
131 
132 static int function_trace_init(struct trace_array *tr)
133 {
134 	ftrace_func_t func;
135 	/*
136 	 * Instance trace_arrays get their ops allocated
137 	 * at instance creation. Unless it failed
138 	 * the allocation.
139 	 */
140 	if (!tr->ops)
141 		return -ENOMEM;
142 
143 	func = select_trace_function(func_flags.val);
144 	if (!func)
145 		return -EINVAL;
146 
147 	if (!handle_func_repeats(tr, func_flags.val))
148 		return -ENOMEM;
149 
150 	ftrace_init_array_ops(tr, func);
151 
152 	tr->array_buffer.cpu = raw_smp_processor_id();
153 
154 	tracing_start_cmdline_record();
155 	tracing_start_function_trace(tr);
156 	return 0;
157 }
158 
159 static void function_trace_reset(struct trace_array *tr)
160 {
161 	tracing_stop_function_trace(tr);
162 	tracing_stop_cmdline_record();
163 	ftrace_reset_array_ops(tr);
164 }
165 
166 static void function_trace_start(struct trace_array *tr)
167 {
168 	tracing_reset_online_cpus(&tr->array_buffer);
169 }
170 
171 static void
172 function_trace_call(unsigned long ip, unsigned long parent_ip,
173 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174 {
175 	struct trace_array *tr = op->private;
176 	struct trace_array_cpu *data;
177 	unsigned int trace_ctx;
178 	int bit;
179 	int cpu;
180 
181 	if (unlikely(!tr->function_enabled))
182 		return;
183 
184 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 	if (bit < 0)
186 		return;
187 
188 	trace_ctx = tracing_gen_ctx();
189 
190 	cpu = smp_processor_id();
191 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
192 	if (!atomic_read(&data->disabled))
193 		trace_function(tr, ip, parent_ip, trace_ctx);
194 
195 	ftrace_test_recursion_unlock(bit);
196 }
197 
198 #ifdef CONFIG_UNWINDER_ORC
199 /*
200  * Skip 2:
201  *
202  *   function_stack_trace_call()
203  *   ftrace_call()
204  */
205 #define STACK_SKIP 2
206 #else
207 /*
208  * Skip 3:
209  *   __trace_stack()
210  *   function_stack_trace_call()
211  *   ftrace_call()
212  */
213 #define STACK_SKIP 3
214 #endif
215 
216 static void
217 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
218 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
219 {
220 	struct trace_array *tr = op->private;
221 	struct trace_array_cpu *data;
222 	unsigned long flags;
223 	long disabled;
224 	int cpu;
225 	unsigned int trace_ctx;
226 
227 	if (unlikely(!tr->function_enabled))
228 		return;
229 
230 	/*
231 	 * Need to use raw, since this must be called before the
232 	 * recursive protection is performed.
233 	 */
234 	local_irq_save(flags);
235 	cpu = raw_smp_processor_id();
236 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
237 	disabled = atomic_inc_return(&data->disabled);
238 
239 	if (likely(disabled == 1)) {
240 		trace_ctx = tracing_gen_ctx_flags(flags);
241 		trace_function(tr, ip, parent_ip, trace_ctx);
242 		__trace_stack(tr, trace_ctx, STACK_SKIP);
243 	}
244 
245 	atomic_dec(&data->disabled);
246 	local_irq_restore(flags);
247 }
248 
249 static inline bool is_repeat_check(struct trace_array *tr,
250 				   struct trace_func_repeats *last_info,
251 				   unsigned long ip, unsigned long parent_ip)
252 {
253 	if (last_info->ip == ip &&
254 	    last_info->parent_ip == parent_ip &&
255 	    last_info->count < U16_MAX) {
256 		last_info->ts_last_call =
257 			ring_buffer_time_stamp(tr->array_buffer.buffer);
258 		last_info->count++;
259 		return true;
260 	}
261 
262 	return false;
263 }
264 
265 static inline void process_repeats(struct trace_array *tr,
266 				   unsigned long ip, unsigned long parent_ip,
267 				   struct trace_func_repeats *last_info,
268 				   unsigned int trace_ctx)
269 {
270 	if (last_info->count) {
271 		trace_last_func_repeats(tr, last_info, trace_ctx);
272 		last_info->count = 0;
273 	}
274 
275 	last_info->ip = ip;
276 	last_info->parent_ip = parent_ip;
277 }
278 
279 static void
280 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
281 			       struct ftrace_ops *op,
282 			       struct ftrace_regs *fregs)
283 {
284 	struct trace_func_repeats *last_info;
285 	struct trace_array *tr = op->private;
286 	struct trace_array_cpu *data;
287 	unsigned int trace_ctx;
288 	unsigned long flags;
289 	int bit;
290 	int cpu;
291 
292 	if (unlikely(!tr->function_enabled))
293 		return;
294 
295 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
296 	if (bit < 0)
297 		return;
298 
299 	cpu = smp_processor_id();
300 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
301 	if (atomic_read(&data->disabled))
302 		goto out;
303 
304 	/*
305 	 * An interrupt may happen at any place here. But as far as I can see,
306 	 * the only damage that this can cause is to mess up the repetition
307 	 * counter without valuable data being lost.
308 	 * TODO: think about a solution that is better than just hoping to be
309 	 * lucky.
310 	 */
311 	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
312 	if (is_repeat_check(tr, last_info, ip, parent_ip))
313 		goto out;
314 
315 	local_save_flags(flags);
316 	trace_ctx = tracing_gen_ctx_flags(flags);
317 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
318 
319 	trace_function(tr, ip, parent_ip, trace_ctx);
320 
321 out:
322 	ftrace_test_recursion_unlock(bit);
323 }
324 
325 static void
326 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
327 				     struct ftrace_ops *op,
328 				     struct ftrace_regs *fregs)
329 {
330 	struct trace_func_repeats *last_info;
331 	struct trace_array *tr = op->private;
332 	struct trace_array_cpu *data;
333 	unsigned long flags;
334 	long disabled;
335 	int cpu;
336 	unsigned int trace_ctx;
337 
338 	if (unlikely(!tr->function_enabled))
339 		return;
340 
341 	/*
342 	 * Need to use raw, since this must be called before the
343 	 * recursive protection is performed.
344 	 */
345 	local_irq_save(flags);
346 	cpu = raw_smp_processor_id();
347 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
348 	disabled = atomic_inc_return(&data->disabled);
349 
350 	if (likely(disabled == 1)) {
351 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
352 		if (is_repeat_check(tr, last_info, ip, parent_ip))
353 			goto out;
354 
355 		trace_ctx = tracing_gen_ctx_flags(flags);
356 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
357 
358 		trace_function(tr, ip, parent_ip, trace_ctx);
359 		__trace_stack(tr, trace_ctx, STACK_SKIP);
360 	}
361 
362  out:
363 	atomic_dec(&data->disabled);
364 	local_irq_restore(flags);
365 }
366 
367 static struct tracer_opt func_opts[] = {
368 #ifdef CONFIG_STACKTRACE
369 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
370 #endif
371 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
372 	{ } /* Always set a last empty entry */
373 };
374 
375 static struct tracer_flags func_flags = {
376 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
377 	.opts = func_opts
378 };
379 
380 static void tracing_start_function_trace(struct trace_array *tr)
381 {
382 	tr->function_enabled = 0;
383 	register_ftrace_function(tr->ops);
384 	tr->function_enabled = 1;
385 }
386 
387 static void tracing_stop_function_trace(struct trace_array *tr)
388 {
389 	tr->function_enabled = 0;
390 	unregister_ftrace_function(tr->ops);
391 }
392 
393 static struct tracer function_trace;
394 
395 static int
396 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
397 {
398 	ftrace_func_t func;
399 	u32 new_flags;
400 
401 	/* Do nothing if already set. */
402 	if (!!set == !!(func_flags.val & bit))
403 		return 0;
404 
405 	/* We can change this flag only when not running. */
406 	if (tr->current_trace != &function_trace)
407 		return 0;
408 
409 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
410 	func = select_trace_function(new_flags);
411 	if (!func)
412 		return -EINVAL;
413 
414 	/* Check if there's anything to change. */
415 	if (tr->ops->func == func)
416 		return 0;
417 
418 	if (!handle_func_repeats(tr, new_flags))
419 		return -ENOMEM;
420 
421 	unregister_ftrace_function(tr->ops);
422 	tr->ops->func = func;
423 	register_ftrace_function(tr->ops);
424 
425 	return 0;
426 }
427 
428 static struct tracer function_trace __tracer_data =
429 {
430 	.name		= "function",
431 	.init		= function_trace_init,
432 	.reset		= function_trace_reset,
433 	.start		= function_trace_start,
434 	.flags		= &func_flags,
435 	.set_flag	= func_set_flag,
436 	.allow_instances = true,
437 #ifdef CONFIG_FTRACE_SELFTEST
438 	.selftest	= trace_selftest_startup_function,
439 #endif
440 };
441 
442 #ifdef CONFIG_DYNAMIC_FTRACE
443 static void update_traceon_count(struct ftrace_probe_ops *ops,
444 				 unsigned long ip,
445 				 struct trace_array *tr, bool on,
446 				 void *data)
447 {
448 	struct ftrace_func_mapper *mapper = data;
449 	long *count;
450 	long old_count;
451 
452 	/*
453 	 * Tracing gets disabled (or enabled) once per count.
454 	 * This function can be called at the same time on multiple CPUs.
455 	 * It is fine if both disable (or enable) tracing, as disabling
456 	 * (or enabling) the second time doesn't do anything as the
457 	 * state of the tracer is already disabled (or enabled).
458 	 * What needs to be synchronized in this case is that the count
459 	 * only gets decremented once, even if the tracer is disabled
460 	 * (or enabled) twice, as the second one is really a nop.
461 	 *
462 	 * The memory barriers guarantee that we only decrement the
463 	 * counter once. First the count is read to a local variable
464 	 * and a read barrier is used to make sure that it is loaded
465 	 * before checking if the tracer is in the state we want.
466 	 * If the tracer is not in the state we want, then the count
467 	 * is guaranteed to be the old count.
468 	 *
469 	 * Next the tracer is set to the state we want (disabled or enabled)
470 	 * then a write memory barrier is used to make sure that
471 	 * the new state is visible before changing the counter by
472 	 * one minus the old counter. This guarantees that another CPU
473 	 * executing this code will see the new state before seeing
474 	 * the new counter value, and would not do anything if the new
475 	 * counter is seen.
476 	 *
477 	 * Note, there is no synchronization between this and a user
478 	 * setting the tracing_on file. But we currently don't care
479 	 * about that.
480 	 */
481 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
482 	old_count = *count;
483 
484 	if (old_count <= 0)
485 		return;
486 
487 	/* Make sure we see count before checking tracing state */
488 	smp_rmb();
489 
490 	if (on == !!tracer_tracing_is_on(tr))
491 		return;
492 
493 	if (on)
494 		tracer_tracing_on(tr);
495 	else
496 		tracer_tracing_off(tr);
497 
498 	/* Make sure tracing state is visible before updating count */
499 	smp_wmb();
500 
501 	*count = old_count - 1;
502 }
503 
504 static void
505 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
506 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
507 		     void *data)
508 {
509 	update_traceon_count(ops, ip, tr, 1, data);
510 }
511 
512 static void
513 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
514 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
515 		      void *data)
516 {
517 	update_traceon_count(ops, ip, tr, 0, data);
518 }
519 
520 static void
521 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
522 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
523 	       void *data)
524 {
525 	if (tracer_tracing_is_on(tr))
526 		return;
527 
528 	tracer_tracing_on(tr);
529 }
530 
531 static void
532 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
533 		struct trace_array *tr, struct ftrace_probe_ops *ops,
534 		void *data)
535 {
536 	if (!tracer_tracing_is_on(tr))
537 		return;
538 
539 	tracer_tracing_off(tr);
540 }
541 
542 #ifdef CONFIG_UNWINDER_ORC
543 /*
544  * Skip 3:
545  *
546  *   function_trace_probe_call()
547  *   ftrace_ops_assist_func()
548  *   ftrace_call()
549  */
550 #define FTRACE_STACK_SKIP 3
551 #else
552 /*
553  * Skip 5:
554  *
555  *   __trace_stack()
556  *   ftrace_stacktrace()
557  *   function_trace_probe_call()
558  *   ftrace_ops_assist_func()
559  *   ftrace_call()
560  */
561 #define FTRACE_STACK_SKIP 5
562 #endif
563 
564 static __always_inline void trace_stack(struct trace_array *tr)
565 {
566 	unsigned int trace_ctx;
567 
568 	trace_ctx = tracing_gen_ctx();
569 
570 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
571 }
572 
573 static void
574 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
575 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
576 		  void *data)
577 {
578 	trace_stack(tr);
579 }
580 
581 static void
582 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
583 			struct trace_array *tr, struct ftrace_probe_ops *ops,
584 			void *data)
585 {
586 	struct ftrace_func_mapper *mapper = data;
587 	long *count;
588 	long old_count;
589 	long new_count;
590 
591 	if (!tracing_is_on())
592 		return;
593 
594 	/* unlimited? */
595 	if (!mapper) {
596 		trace_stack(tr);
597 		return;
598 	}
599 
600 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
601 
602 	/*
603 	 * Stack traces should only execute the number of times the
604 	 * user specified in the counter.
605 	 */
606 	do {
607 		old_count = *count;
608 
609 		if (!old_count)
610 			return;
611 
612 		new_count = old_count - 1;
613 		new_count = cmpxchg(count, old_count, new_count);
614 		if (new_count == old_count)
615 			trace_stack(tr);
616 
617 		if (!tracing_is_on())
618 			return;
619 
620 	} while (new_count != old_count);
621 }
622 
623 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
624 			void *data)
625 {
626 	struct ftrace_func_mapper *mapper = data;
627 	long *count = NULL;
628 
629 	if (mapper)
630 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
631 
632 	if (count) {
633 		if (*count <= 0)
634 			return 0;
635 		(*count)--;
636 	}
637 
638 	return 1;
639 }
640 
641 static void
642 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
643 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
644 		  void *data)
645 {
646 	if (update_count(ops, ip, data))
647 		ftrace_dump(DUMP_ALL);
648 }
649 
650 /* Only dump the current CPU buffer. */
651 static void
652 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
653 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
654 		     void *data)
655 {
656 	if (update_count(ops, ip, data))
657 		ftrace_dump(DUMP_ORIG);
658 }
659 
660 static int
661 ftrace_probe_print(const char *name, struct seq_file *m,
662 		   unsigned long ip, struct ftrace_probe_ops *ops,
663 		   void *data)
664 {
665 	struct ftrace_func_mapper *mapper = data;
666 	long *count = NULL;
667 
668 	seq_printf(m, "%ps:%s", (void *)ip, name);
669 
670 	if (mapper)
671 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
672 
673 	if (count)
674 		seq_printf(m, ":count=%ld\n", *count);
675 	else
676 		seq_puts(m, ":unlimited\n");
677 
678 	return 0;
679 }
680 
681 static int
682 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
683 		     struct ftrace_probe_ops *ops,
684 		     void *data)
685 {
686 	return ftrace_probe_print("traceon", m, ip, ops, data);
687 }
688 
689 static int
690 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
691 			 struct ftrace_probe_ops *ops, void *data)
692 {
693 	return ftrace_probe_print("traceoff", m, ip, ops, data);
694 }
695 
696 static int
697 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
698 			struct ftrace_probe_ops *ops, void *data)
699 {
700 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
701 }
702 
703 static int
704 ftrace_dump_print(struct seq_file *m, unsigned long ip,
705 			struct ftrace_probe_ops *ops, void *data)
706 {
707 	return ftrace_probe_print("dump", m, ip, ops, data);
708 }
709 
710 static int
711 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
712 			struct ftrace_probe_ops *ops, void *data)
713 {
714 	return ftrace_probe_print("cpudump", m, ip, ops, data);
715 }
716 
717 
718 static int
719 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
720 		  unsigned long ip, void *init_data, void **data)
721 {
722 	struct ftrace_func_mapper *mapper = *data;
723 
724 	if (!mapper) {
725 		mapper = allocate_ftrace_func_mapper();
726 		if (!mapper)
727 			return -ENOMEM;
728 		*data = mapper;
729 	}
730 
731 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
732 }
733 
734 static void
735 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
736 		  unsigned long ip, void *data)
737 {
738 	struct ftrace_func_mapper *mapper = data;
739 
740 	if (!ip) {
741 		free_ftrace_func_mapper(mapper, NULL);
742 		return;
743 	}
744 
745 	ftrace_func_mapper_remove_ip(mapper, ip);
746 }
747 
748 static struct ftrace_probe_ops traceon_count_probe_ops = {
749 	.func			= ftrace_traceon_count,
750 	.print			= ftrace_traceon_print,
751 	.init			= ftrace_count_init,
752 	.free			= ftrace_count_free,
753 };
754 
755 static struct ftrace_probe_ops traceoff_count_probe_ops = {
756 	.func			= ftrace_traceoff_count,
757 	.print			= ftrace_traceoff_print,
758 	.init			= ftrace_count_init,
759 	.free			= ftrace_count_free,
760 };
761 
762 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
763 	.func			= ftrace_stacktrace_count,
764 	.print			= ftrace_stacktrace_print,
765 	.init			= ftrace_count_init,
766 	.free			= ftrace_count_free,
767 };
768 
769 static struct ftrace_probe_ops dump_probe_ops = {
770 	.func			= ftrace_dump_probe,
771 	.print			= ftrace_dump_print,
772 	.init			= ftrace_count_init,
773 	.free			= ftrace_count_free,
774 };
775 
776 static struct ftrace_probe_ops cpudump_probe_ops = {
777 	.func			= ftrace_cpudump_probe,
778 	.print			= ftrace_cpudump_print,
779 };
780 
781 static struct ftrace_probe_ops traceon_probe_ops = {
782 	.func			= ftrace_traceon,
783 	.print			= ftrace_traceon_print,
784 };
785 
786 static struct ftrace_probe_ops traceoff_probe_ops = {
787 	.func			= ftrace_traceoff,
788 	.print			= ftrace_traceoff_print,
789 };
790 
791 static struct ftrace_probe_ops stacktrace_probe_ops = {
792 	.func			= ftrace_stacktrace,
793 	.print			= ftrace_stacktrace_print,
794 };
795 
796 static int
797 ftrace_trace_probe_callback(struct trace_array *tr,
798 			    struct ftrace_probe_ops *ops,
799 			    struct ftrace_hash *hash, char *glob,
800 			    char *cmd, char *param, int enable)
801 {
802 	void *count = (void *)-1;
803 	char *number;
804 	int ret;
805 
806 	/* hash funcs only work with set_ftrace_filter */
807 	if (!enable)
808 		return -EINVAL;
809 
810 	if (glob[0] == '!')
811 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
812 
813 	if (!param)
814 		goto out_reg;
815 
816 	number = strsep(&param, ":");
817 
818 	if (!strlen(number))
819 		goto out_reg;
820 
821 	/*
822 	 * We use the callback data field (which is a pointer)
823 	 * as our counter.
824 	 */
825 	ret = kstrtoul(number, 0, (unsigned long *)&count);
826 	if (ret)
827 		return ret;
828 
829  out_reg:
830 	ret = register_ftrace_function_probe(glob, tr, ops, count);
831 
832 	return ret < 0 ? ret : 0;
833 }
834 
835 static int
836 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
837 			    char *glob, char *cmd, char *param, int enable)
838 {
839 	struct ftrace_probe_ops *ops;
840 
841 	if (!tr)
842 		return -ENODEV;
843 
844 	/* we register both traceon and traceoff to this callback */
845 	if (strcmp(cmd, "traceon") == 0)
846 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
847 	else
848 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
849 
850 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
851 					   param, enable);
852 }
853 
854 static int
855 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
856 			   char *glob, char *cmd, char *param, int enable)
857 {
858 	struct ftrace_probe_ops *ops;
859 
860 	if (!tr)
861 		return -ENODEV;
862 
863 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
864 
865 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
866 					   param, enable);
867 }
868 
869 static int
870 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
871 			   char *glob, char *cmd, char *param, int enable)
872 {
873 	struct ftrace_probe_ops *ops;
874 
875 	if (!tr)
876 		return -ENODEV;
877 
878 	ops = &dump_probe_ops;
879 
880 	/* Only dump once. */
881 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
882 					   "1", enable);
883 }
884 
885 static int
886 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
887 			   char *glob, char *cmd, char *param, int enable)
888 {
889 	struct ftrace_probe_ops *ops;
890 
891 	if (!tr)
892 		return -ENODEV;
893 
894 	ops = &cpudump_probe_ops;
895 
896 	/* Only dump once. */
897 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
898 					   "1", enable);
899 }
900 
901 static struct ftrace_func_command ftrace_traceon_cmd = {
902 	.name			= "traceon",
903 	.func			= ftrace_trace_onoff_callback,
904 };
905 
906 static struct ftrace_func_command ftrace_traceoff_cmd = {
907 	.name			= "traceoff",
908 	.func			= ftrace_trace_onoff_callback,
909 };
910 
911 static struct ftrace_func_command ftrace_stacktrace_cmd = {
912 	.name			= "stacktrace",
913 	.func			= ftrace_stacktrace_callback,
914 };
915 
916 static struct ftrace_func_command ftrace_dump_cmd = {
917 	.name			= "dump",
918 	.func			= ftrace_dump_callback,
919 };
920 
921 static struct ftrace_func_command ftrace_cpudump_cmd = {
922 	.name			= "cpudump",
923 	.func			= ftrace_cpudump_callback,
924 };
925 
926 static int __init init_func_cmd_traceon(void)
927 {
928 	int ret;
929 
930 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
931 	if (ret)
932 		return ret;
933 
934 	ret = register_ftrace_command(&ftrace_traceon_cmd);
935 	if (ret)
936 		goto out_free_traceoff;
937 
938 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
939 	if (ret)
940 		goto out_free_traceon;
941 
942 	ret = register_ftrace_command(&ftrace_dump_cmd);
943 	if (ret)
944 		goto out_free_stacktrace;
945 
946 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
947 	if (ret)
948 		goto out_free_dump;
949 
950 	return 0;
951 
952  out_free_dump:
953 	unregister_ftrace_command(&ftrace_dump_cmd);
954  out_free_stacktrace:
955 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
956  out_free_traceon:
957 	unregister_ftrace_command(&ftrace_traceon_cmd);
958  out_free_traceoff:
959 	unregister_ftrace_command(&ftrace_traceoff_cmd);
960 
961 	return ret;
962 }
963 #else
964 static inline int init_func_cmd_traceon(void)
965 {
966 	return 0;
967 }
968 #endif /* CONFIG_DYNAMIC_FTRACE */
969 
970 __init int init_function_trace(void)
971 {
972 	init_func_cmd_traceon();
973 	return register_tracer(&function_trace);
974 }
975