1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
17 #include <linux/fs.h>
18 
19 #include "trace.h"
20 
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
23 static void
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25 		    struct ftrace_ops *op, struct pt_regs *pt_regs);
26 static void
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 			  struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct tracer_flags func_flags;
30 
31 /* Our option */
32 enum {
33 	TRACE_FUNC_OPT_STACK	= 0x1,
34 };
35 
36 static int allocate_ftrace_ops(struct trace_array *tr)
37 {
38 	struct ftrace_ops *ops;
39 
40 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41 	if (!ops)
42 		return -ENOMEM;
43 
44 	/* Currently only the non stack verision is supported */
45 	ops->func = function_trace_call;
46 	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
47 
48 	tr->ops = ops;
49 	ops->private = tr;
50 	return 0;
51 }
52 
53 
54 int ftrace_create_function_files(struct trace_array *tr,
55 				 struct dentry *parent)
56 {
57 	int ret;
58 
59 	/*
60 	 * The top level array uses the "global_ops", and the files are
61 	 * created on boot up.
62 	 */
63 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64 		return 0;
65 
66 	ret = allocate_ftrace_ops(tr);
67 	if (ret)
68 		return ret;
69 
70 	ftrace_create_filter_files(tr->ops, parent);
71 
72 	return 0;
73 }
74 
75 void ftrace_destroy_function_files(struct trace_array *tr)
76 {
77 	ftrace_destroy_filter_files(tr->ops);
78 	kfree(tr->ops);
79 	tr->ops = NULL;
80 }
81 
82 static int function_trace_init(struct trace_array *tr)
83 {
84 	ftrace_func_t func;
85 
86 	/*
87 	 * Instance trace_arrays get their ops allocated
88 	 * at instance creation. Unless it failed
89 	 * the allocation.
90 	 */
91 	if (!tr->ops)
92 		return -ENOMEM;
93 
94 	/* Currently only the global instance can do stack tracing */
95 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 	    func_flags.val & TRACE_FUNC_OPT_STACK)
97 		func = function_stack_trace_call;
98 	else
99 		func = function_trace_call;
100 
101 	ftrace_init_array_ops(tr, func);
102 
103 	tr->trace_buffer.cpu = get_cpu();
104 	put_cpu();
105 
106 	tracing_start_cmdline_record();
107 	tracing_start_function_trace(tr);
108 	return 0;
109 }
110 
111 static void function_trace_reset(struct trace_array *tr)
112 {
113 	tracing_stop_function_trace(tr);
114 	tracing_stop_cmdline_record();
115 	ftrace_reset_array_ops(tr);
116 }
117 
118 static void function_trace_start(struct trace_array *tr)
119 {
120 	tracing_reset_online_cpus(&tr->trace_buffer);
121 }
122 
123 static void
124 function_trace_call(unsigned long ip, unsigned long parent_ip,
125 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126 {
127 	struct trace_array *tr = op->private;
128 	struct trace_array_cpu *data;
129 	unsigned long flags;
130 	int bit;
131 	int cpu;
132 	int pc;
133 
134 	if (unlikely(!tr->function_enabled))
135 		return;
136 
137 	pc = preempt_count();
138 	preempt_disable_notrace();
139 
140 	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141 	if (bit < 0)
142 		goto out;
143 
144 	cpu = smp_processor_id();
145 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146 	if (!atomic_read(&data->disabled)) {
147 		local_save_flags(flags);
148 		trace_function(tr, ip, parent_ip, flags, pc);
149 	}
150 	trace_clear_recursion(bit);
151 
152  out:
153 	preempt_enable_notrace();
154 }
155 
156 static void
157 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
159 {
160 	struct trace_array *tr = op->private;
161 	struct trace_array_cpu *data;
162 	unsigned long flags;
163 	long disabled;
164 	int cpu;
165 	int pc;
166 
167 	if (unlikely(!tr->function_enabled))
168 		return;
169 
170 	/*
171 	 * Need to use raw, since this must be called before the
172 	 * recursive protection is performed.
173 	 */
174 	local_irq_save(flags);
175 	cpu = raw_smp_processor_id();
176 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177 	disabled = atomic_inc_return(&data->disabled);
178 
179 	if (likely(disabled == 1)) {
180 		pc = preempt_count();
181 		trace_function(tr, ip, parent_ip, flags, pc);
182 		/*
183 		 * skip over 5 funcs:
184 		 *    __ftrace_trace_stack,
185 		 *    __trace_stack,
186 		 *    function_stack_trace_call
187 		 *    ftrace_list_func
188 		 *    ftrace_call
189 		 */
190 		__trace_stack(tr, flags, 5, pc);
191 	}
192 
193 	atomic_dec(&data->disabled);
194 	local_irq_restore(flags);
195 }
196 
197 static struct tracer_opt func_opts[] = {
198 #ifdef CONFIG_STACKTRACE
199 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200 #endif
201 	{ } /* Always set a last empty entry */
202 };
203 
204 static struct tracer_flags func_flags = {
205 	.val = 0, /* By default: all flags disabled */
206 	.opts = func_opts
207 };
208 
209 static void tracing_start_function_trace(struct trace_array *tr)
210 {
211 	tr->function_enabled = 0;
212 	register_ftrace_function(tr->ops);
213 	tr->function_enabled = 1;
214 }
215 
216 static void tracing_stop_function_trace(struct trace_array *tr)
217 {
218 	tr->function_enabled = 0;
219 	unregister_ftrace_function(tr->ops);
220 }
221 
222 static struct tracer function_trace;
223 
224 static int
225 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226 {
227 	switch (bit) {
228 	case TRACE_FUNC_OPT_STACK:
229 		/* do nothing if already set */
230 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231 			break;
232 
233 		/* We can change this flag when not running. */
234 		if (tr->current_trace != &function_trace)
235 			break;
236 
237 		unregister_ftrace_function(tr->ops);
238 
239 		if (set) {
240 			tr->ops->func = function_stack_trace_call;
241 			register_ftrace_function(tr->ops);
242 		} else {
243 			tr->ops->func = function_trace_call;
244 			register_ftrace_function(tr->ops);
245 		}
246 
247 		break;
248 	default:
249 		return -EINVAL;
250 	}
251 
252 	return 0;
253 }
254 
255 static struct tracer function_trace __tracer_data =
256 {
257 	.name		= "function",
258 	.init		= function_trace_init,
259 	.reset		= function_trace_reset,
260 	.start		= function_trace_start,
261 	.flags		= &func_flags,
262 	.set_flag	= func_set_flag,
263 	.allow_instances = true,
264 #ifdef CONFIG_FTRACE_SELFTEST
265 	.selftest	= trace_selftest_startup_function,
266 #endif
267 };
268 
269 #ifdef CONFIG_DYNAMIC_FTRACE
270 static void update_traceon_count(struct ftrace_probe_ops *ops,
271 				 unsigned long ip,
272 				 struct trace_array *tr, bool on,
273 				 void *data)
274 {
275 	struct ftrace_func_mapper *mapper = data;
276 	long *count;
277 	long old_count;
278 
279 	/*
280 	 * Tracing gets disabled (or enabled) once per count.
281 	 * This function can be called at the same time on multiple CPUs.
282 	 * It is fine if both disable (or enable) tracing, as disabling
283 	 * (or enabling) the second time doesn't do anything as the
284 	 * state of the tracer is already disabled (or enabled).
285 	 * What needs to be synchronized in this case is that the count
286 	 * only gets decremented once, even if the tracer is disabled
287 	 * (or enabled) twice, as the second one is really a nop.
288 	 *
289 	 * The memory barriers guarantee that we only decrement the
290 	 * counter once. First the count is read to a local variable
291 	 * and a read barrier is used to make sure that it is loaded
292 	 * before checking if the tracer is in the state we want.
293 	 * If the tracer is not in the state we want, then the count
294 	 * is guaranteed to be the old count.
295 	 *
296 	 * Next the tracer is set to the state we want (disabled or enabled)
297 	 * then a write memory barrier is used to make sure that
298 	 * the new state is visible before changing the counter by
299 	 * one minus the old counter. This guarantees that another CPU
300 	 * executing this code will see the new state before seeing
301 	 * the new counter value, and would not do anything if the new
302 	 * counter is seen.
303 	 *
304 	 * Note, there is no synchronization between this and a user
305 	 * setting the tracing_on file. But we currently don't care
306 	 * about that.
307 	 */
308 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
309 	old_count = *count;
310 
311 	if (old_count <= 0)
312 		return;
313 
314 	/* Make sure we see count before checking tracing state */
315 	smp_rmb();
316 
317 	if (on == !!tracer_tracing_is_on(tr))
318 		return;
319 
320 	if (on)
321 		tracer_tracing_on(tr);
322 	else
323 		tracer_tracing_off(tr);
324 
325 	/* Make sure tracing state is visible before updating count */
326 	smp_wmb();
327 
328 	*count = old_count - 1;
329 }
330 
331 static void
332 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
333 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
334 		     void *data)
335 {
336 	update_traceon_count(ops, ip, tr, 1, data);
337 }
338 
339 static void
340 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
341 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
342 		      void *data)
343 {
344 	update_traceon_count(ops, ip, tr, 0, data);
345 }
346 
347 static void
348 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
349 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
350 	       void *data)
351 {
352 	if (tracer_tracing_is_on(tr))
353 		return;
354 
355 	tracer_tracing_on(tr);
356 }
357 
358 static void
359 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
360 		struct trace_array *tr, struct ftrace_probe_ops *ops,
361 		void *data)
362 {
363 	if (!tracer_tracing_is_on(tr))
364 		return;
365 
366 	tracer_tracing_off(tr);
367 }
368 
369 /*
370  * Skip 4:
371  *   ftrace_stacktrace()
372  *   function_trace_probe_call()
373  *   ftrace_ops_list_func()
374  *   ftrace_call()
375  */
376 #define STACK_SKIP 4
377 
378 static __always_inline void trace_stack(struct trace_array *tr)
379 {
380 	unsigned long flags;
381 	int pc;
382 
383 	local_save_flags(flags);
384 	pc = preempt_count();
385 
386 	__trace_stack(tr, flags, STACK_SKIP, pc);
387 }
388 
389 static void
390 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
391 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
392 		  void *data)
393 {
394 	trace_stack(tr);
395 }
396 
397 static void
398 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
399 			struct trace_array *tr, struct ftrace_probe_ops *ops,
400 			void *data)
401 {
402 	struct ftrace_func_mapper *mapper = data;
403 	long *count;
404 	long old_count;
405 	long new_count;
406 
407 	if (!tracing_is_on())
408 		return;
409 
410 	/* unlimited? */
411 	if (!mapper) {
412 		trace_stack(tr);
413 		return;
414 	}
415 
416 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
417 
418 	/*
419 	 * Stack traces should only execute the number of times the
420 	 * user specified in the counter.
421 	 */
422 	do {
423 		old_count = *count;
424 
425 		if (!old_count)
426 			return;
427 
428 		new_count = old_count - 1;
429 		new_count = cmpxchg(count, old_count, new_count);
430 		if (new_count == old_count)
431 			trace_stack(tr);
432 
433 		if (!tracing_is_on())
434 			return;
435 
436 	} while (new_count != old_count);
437 }
438 
439 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
440 			void *data)
441 {
442 	struct ftrace_func_mapper *mapper = data;
443 	long *count = NULL;
444 
445 	if (mapper)
446 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
447 
448 	if (count) {
449 		if (*count <= 0)
450 			return 0;
451 		(*count)--;
452 	}
453 
454 	return 1;
455 }
456 
457 static void
458 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
459 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
460 		  void *data)
461 {
462 	if (update_count(ops, ip, data))
463 		ftrace_dump(DUMP_ALL);
464 }
465 
466 /* Only dump the current CPU buffer. */
467 static void
468 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
469 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
470 		     void *data)
471 {
472 	if (update_count(ops, ip, data))
473 		ftrace_dump(DUMP_ORIG);
474 }
475 
476 static int
477 ftrace_probe_print(const char *name, struct seq_file *m,
478 		   unsigned long ip, struct ftrace_probe_ops *ops,
479 		   void *data)
480 {
481 	struct ftrace_func_mapper *mapper = data;
482 	long *count = NULL;
483 
484 	seq_printf(m, "%ps:%s", (void *)ip, name);
485 
486 	if (mapper)
487 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
488 
489 	if (count)
490 		seq_printf(m, ":count=%ld\n", *count);
491 	else
492 		seq_puts(m, ":unlimited\n");
493 
494 	return 0;
495 }
496 
497 static int
498 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
499 		     struct ftrace_probe_ops *ops,
500 		     void *data)
501 {
502 	return ftrace_probe_print("traceon", m, ip, ops, data);
503 }
504 
505 static int
506 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
507 			 struct ftrace_probe_ops *ops, void *data)
508 {
509 	return ftrace_probe_print("traceoff", m, ip, ops, data);
510 }
511 
512 static int
513 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
514 			struct ftrace_probe_ops *ops, void *data)
515 {
516 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
517 }
518 
519 static int
520 ftrace_dump_print(struct seq_file *m, unsigned long ip,
521 			struct ftrace_probe_ops *ops, void *data)
522 {
523 	return ftrace_probe_print("dump", m, ip, ops, data);
524 }
525 
526 static int
527 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
528 			struct ftrace_probe_ops *ops, void *data)
529 {
530 	return ftrace_probe_print("cpudump", m, ip, ops, data);
531 }
532 
533 
534 static int
535 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
536 		  unsigned long ip, void *init_data, void **data)
537 {
538 	struct ftrace_func_mapper *mapper = *data;
539 
540 	if (!mapper) {
541 		mapper = allocate_ftrace_func_mapper();
542 		if (!mapper)
543 			return -ENOMEM;
544 		*data = mapper;
545 	}
546 
547 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
548 }
549 
550 static void
551 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
552 		  unsigned long ip, void *data)
553 {
554 	struct ftrace_func_mapper *mapper = data;
555 
556 	if (!ip) {
557 		free_ftrace_func_mapper(mapper, NULL);
558 		return;
559 	}
560 
561 	ftrace_func_mapper_remove_ip(mapper, ip);
562 }
563 
564 static struct ftrace_probe_ops traceon_count_probe_ops = {
565 	.func			= ftrace_traceon_count,
566 	.print			= ftrace_traceon_print,
567 	.init			= ftrace_count_init,
568 	.free			= ftrace_count_free,
569 };
570 
571 static struct ftrace_probe_ops traceoff_count_probe_ops = {
572 	.func			= ftrace_traceoff_count,
573 	.print			= ftrace_traceoff_print,
574 	.init			= ftrace_count_init,
575 	.free			= ftrace_count_free,
576 };
577 
578 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
579 	.func			= ftrace_stacktrace_count,
580 	.print			= ftrace_stacktrace_print,
581 	.init			= ftrace_count_init,
582 	.free			= ftrace_count_free,
583 };
584 
585 static struct ftrace_probe_ops dump_probe_ops = {
586 	.func			= ftrace_dump_probe,
587 	.print			= ftrace_dump_print,
588 	.init			= ftrace_count_init,
589 	.free			= ftrace_count_free,
590 };
591 
592 static struct ftrace_probe_ops cpudump_probe_ops = {
593 	.func			= ftrace_cpudump_probe,
594 	.print			= ftrace_cpudump_print,
595 };
596 
597 static struct ftrace_probe_ops traceon_probe_ops = {
598 	.func			= ftrace_traceon,
599 	.print			= ftrace_traceon_print,
600 };
601 
602 static struct ftrace_probe_ops traceoff_probe_ops = {
603 	.func			= ftrace_traceoff,
604 	.print			= ftrace_traceoff_print,
605 };
606 
607 static struct ftrace_probe_ops stacktrace_probe_ops = {
608 	.func			= ftrace_stacktrace,
609 	.print			= ftrace_stacktrace_print,
610 };
611 
612 static int
613 ftrace_trace_probe_callback(struct trace_array *tr,
614 			    struct ftrace_probe_ops *ops,
615 			    struct ftrace_hash *hash, char *glob,
616 			    char *cmd, char *param, int enable)
617 {
618 	void *count = (void *)-1;
619 	char *number;
620 	int ret;
621 
622 	/* hash funcs only work with set_ftrace_filter */
623 	if (!enable)
624 		return -EINVAL;
625 
626 	if (glob[0] == '!')
627 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
628 
629 	if (!param)
630 		goto out_reg;
631 
632 	number = strsep(&param, ":");
633 
634 	if (!strlen(number))
635 		goto out_reg;
636 
637 	/*
638 	 * We use the callback data field (which is a pointer)
639 	 * as our counter.
640 	 */
641 	ret = kstrtoul(number, 0, (unsigned long *)&count);
642 	if (ret)
643 		return ret;
644 
645  out_reg:
646 	ret = register_ftrace_function_probe(glob, tr, ops, count);
647 
648 	return ret < 0 ? ret : 0;
649 }
650 
651 static int
652 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
653 			    char *glob, char *cmd, char *param, int enable)
654 {
655 	struct ftrace_probe_ops *ops;
656 
657 	if (!tr)
658 		return -ENODEV;
659 
660 	/* we register both traceon and traceoff to this callback */
661 	if (strcmp(cmd, "traceon") == 0)
662 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
663 	else
664 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
665 
666 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
667 					   param, enable);
668 }
669 
670 static int
671 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
672 			   char *glob, char *cmd, char *param, int enable)
673 {
674 	struct ftrace_probe_ops *ops;
675 
676 	if (!tr)
677 		return -ENODEV;
678 
679 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
680 
681 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
682 					   param, enable);
683 }
684 
685 static int
686 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
687 			   char *glob, char *cmd, char *param, int enable)
688 {
689 	struct ftrace_probe_ops *ops;
690 
691 	if (!tr)
692 		return -ENODEV;
693 
694 	ops = &dump_probe_ops;
695 
696 	/* Only dump once. */
697 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
698 					   "1", enable);
699 }
700 
701 static int
702 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
703 			   char *glob, char *cmd, char *param, int enable)
704 {
705 	struct ftrace_probe_ops *ops;
706 
707 	if (!tr)
708 		return -ENODEV;
709 
710 	ops = &cpudump_probe_ops;
711 
712 	/* Only dump once. */
713 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
714 					   "1", enable);
715 }
716 
717 static struct ftrace_func_command ftrace_traceon_cmd = {
718 	.name			= "traceon",
719 	.func			= ftrace_trace_onoff_callback,
720 };
721 
722 static struct ftrace_func_command ftrace_traceoff_cmd = {
723 	.name			= "traceoff",
724 	.func			= ftrace_trace_onoff_callback,
725 };
726 
727 static struct ftrace_func_command ftrace_stacktrace_cmd = {
728 	.name			= "stacktrace",
729 	.func			= ftrace_stacktrace_callback,
730 };
731 
732 static struct ftrace_func_command ftrace_dump_cmd = {
733 	.name			= "dump",
734 	.func			= ftrace_dump_callback,
735 };
736 
737 static struct ftrace_func_command ftrace_cpudump_cmd = {
738 	.name			= "cpudump",
739 	.func			= ftrace_cpudump_callback,
740 };
741 
742 static int __init init_func_cmd_traceon(void)
743 {
744 	int ret;
745 
746 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
747 	if (ret)
748 		return ret;
749 
750 	ret = register_ftrace_command(&ftrace_traceon_cmd);
751 	if (ret)
752 		goto out_free_traceoff;
753 
754 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
755 	if (ret)
756 		goto out_free_traceon;
757 
758 	ret = register_ftrace_command(&ftrace_dump_cmd);
759 	if (ret)
760 		goto out_free_stacktrace;
761 
762 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
763 	if (ret)
764 		goto out_free_dump;
765 
766 	return 0;
767 
768  out_free_dump:
769 	unregister_ftrace_command(&ftrace_dump_cmd);
770  out_free_stacktrace:
771 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
772  out_free_traceon:
773 	unregister_ftrace_command(&ftrace_traceon_cmd);
774  out_free_traceoff:
775 	unregister_ftrace_command(&ftrace_traceoff_cmd);
776 
777 	return ret;
778 }
779 #else
780 static inline int init_func_cmd_traceon(void)
781 {
782 	return 0;
783 }
784 #endif /* CONFIG_DYNAMIC_FTRACE */
785 
786 __init int init_function_trace(void)
787 {
788 	init_func_cmd_traceon();
789 	return register_tracer(&function_trace);
790 }
791