1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38
39 /* Our option */
40 enum {
41
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
48 };
49
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51
ftrace_allocate_ftrace_ops(struct trace_array * tr)52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 struct ftrace_ops *ops;
55
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
63
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
67
68 tr->ops = ops;
69 ops->private = tr;
70
71 return 0;
72 }
73
ftrace_free_ftrace_ops(struct trace_array * tr)74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 kfree(tr->ops);
77 tr->ops = NULL;
78 }
79
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)80 int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82 {
83 /*
84 * The top level array uses the "global_ops", and the files are
85 * created on boot up.
86 */
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 return 0;
89
90 if (!tr->ops)
91 return -EINVAL;
92
93 ftrace_create_filter_files(tr->ops, parent);
94
95 return 0;
96 }
97
ftrace_destroy_function_files(struct trace_array * tr)98 void ftrace_destroy_function_files(struct trace_array *tr)
99 {
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
102 }
103
select_trace_function(u32 flags_val)104 static ftrace_func_t select_trace_function(u32 flags_val)
105 {
106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 case TRACE_FUNC_NO_OPTS:
108 return function_trace_call;
109 case TRACE_FUNC_OPT_STACK:
110 return function_stack_trace_call;
111 case TRACE_FUNC_OPT_NO_REPEATS:
112 return function_no_repeats_trace_call;
113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 return function_stack_no_repeats_trace_call;
115 default:
116 return NULL;
117 }
118 }
119
handle_func_repeats(struct trace_array * tr,u32 flags_val)120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121 {
122 if (!tr->last_func_repeats &&
123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
126 return false;
127 }
128
129 return true;
130 }
131
function_trace_init(struct trace_array * tr)132 static int function_trace_init(struct trace_array *tr)
133 {
134 ftrace_func_t func;
135 /*
136 * Instance trace_arrays get their ops allocated
137 * at instance creation. Unless it failed
138 * the allocation.
139 */
140 if (!tr->ops)
141 return -ENOMEM;
142
143 func = select_trace_function(func_flags.val);
144 if (!func)
145 return -EINVAL;
146
147 if (!handle_func_repeats(tr, func_flags.val))
148 return -ENOMEM;
149
150 ftrace_init_array_ops(tr, func);
151
152 tr->array_buffer.cpu = raw_smp_processor_id();
153
154 tracing_start_cmdline_record();
155 tracing_start_function_trace(tr);
156 return 0;
157 }
158
function_trace_reset(struct trace_array * tr)159 static void function_trace_reset(struct trace_array *tr)
160 {
161 tracing_stop_function_trace(tr);
162 tracing_stop_cmdline_record();
163 ftrace_reset_array_ops(tr);
164 }
165
function_trace_start(struct trace_array * tr)166 static void function_trace_start(struct trace_array *tr)
167 {
168 tracing_reset_online_cpus(&tr->array_buffer);
169 }
170
171 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)172 function_trace_call(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct ftrace_regs *fregs)
174 {
175 struct trace_array *tr = op->private;
176 struct trace_array_cpu *data;
177 unsigned int trace_ctx;
178 int bit;
179 int cpu;
180
181 if (unlikely(!tr->function_enabled))
182 return;
183
184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 if (bit < 0)
186 return;
187
188 trace_ctx = tracing_gen_ctx_dec();
189
190 cpu = smp_processor_id();
191 data = per_cpu_ptr(tr->array_buffer.data, cpu);
192 if (!atomic_read(&data->disabled))
193 trace_function(tr, ip, parent_ip, trace_ctx);
194
195 ftrace_test_recursion_unlock(bit);
196 }
197
198 #ifdef CONFIG_UNWINDER_ORC
199 /*
200 * Skip 2:
201 *
202 * function_stack_trace_call()
203 * ftrace_call()
204 */
205 #define STACK_SKIP 2
206 #else
207 /*
208 * Skip 3:
209 * __trace_stack()
210 * function_stack_trace_call()
211 * ftrace_call()
212 */
213 #define STACK_SKIP 3
214 #endif
215
216 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)217 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
218 struct ftrace_ops *op, struct ftrace_regs *fregs)
219 {
220 struct trace_array *tr = op->private;
221 struct trace_array_cpu *data;
222 unsigned long flags;
223 long disabled;
224 int cpu;
225 unsigned int trace_ctx;
226
227 if (unlikely(!tr->function_enabled))
228 return;
229
230 /*
231 * Need to use raw, since this must be called before the
232 * recursive protection is performed.
233 */
234 local_irq_save(flags);
235 cpu = raw_smp_processor_id();
236 data = per_cpu_ptr(tr->array_buffer.data, cpu);
237 disabled = atomic_inc_return(&data->disabled);
238
239 if (likely(disabled == 1)) {
240 trace_ctx = tracing_gen_ctx_flags(flags);
241 trace_function(tr, ip, parent_ip, trace_ctx);
242 __trace_stack(tr, trace_ctx, STACK_SKIP);
243 }
244
245 atomic_dec(&data->disabled);
246 local_irq_restore(flags);
247 }
248
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)249 static inline bool is_repeat_check(struct trace_array *tr,
250 struct trace_func_repeats *last_info,
251 unsigned long ip, unsigned long parent_ip)
252 {
253 if (last_info->ip == ip &&
254 last_info->parent_ip == parent_ip &&
255 last_info->count < U16_MAX) {
256 last_info->ts_last_call =
257 ring_buffer_time_stamp(tr->array_buffer.buffer);
258 last_info->count++;
259 return true;
260 }
261
262 return false;
263 }
264
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)265 static inline void process_repeats(struct trace_array *tr,
266 unsigned long ip, unsigned long parent_ip,
267 struct trace_func_repeats *last_info,
268 unsigned int trace_ctx)
269 {
270 if (last_info->count) {
271 trace_last_func_repeats(tr, last_info, trace_ctx);
272 last_info->count = 0;
273 }
274
275 last_info->ip = ip;
276 last_info->parent_ip = parent_ip;
277 }
278
279 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)280 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
281 struct ftrace_ops *op,
282 struct ftrace_regs *fregs)
283 {
284 struct trace_func_repeats *last_info;
285 struct trace_array *tr = op->private;
286 struct trace_array_cpu *data;
287 unsigned int trace_ctx;
288 int bit;
289 int cpu;
290
291 if (unlikely(!tr->function_enabled))
292 return;
293
294 bit = ftrace_test_recursion_trylock(ip, parent_ip);
295 if (bit < 0)
296 return;
297
298 cpu = smp_processor_id();
299 data = per_cpu_ptr(tr->array_buffer.data, cpu);
300 if (atomic_read(&data->disabled))
301 goto out;
302
303 /*
304 * An interrupt may happen at any place here. But as far as I can see,
305 * the only damage that this can cause is to mess up the repetition
306 * counter without valuable data being lost.
307 * TODO: think about a solution that is better than just hoping to be
308 * lucky.
309 */
310 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
311 if (is_repeat_check(tr, last_info, ip, parent_ip))
312 goto out;
313
314 trace_ctx = tracing_gen_ctx_dec();
315 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
316
317 trace_function(tr, ip, parent_ip, trace_ctx);
318
319 out:
320 ftrace_test_recursion_unlock(bit);
321 }
322
323 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)324 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
325 struct ftrace_ops *op,
326 struct ftrace_regs *fregs)
327 {
328 struct trace_func_repeats *last_info;
329 struct trace_array *tr = op->private;
330 struct trace_array_cpu *data;
331 unsigned long flags;
332 long disabled;
333 int cpu;
334 unsigned int trace_ctx;
335
336 if (unlikely(!tr->function_enabled))
337 return;
338
339 /*
340 * Need to use raw, since this must be called before the
341 * recursive protection is performed.
342 */
343 local_irq_save(flags);
344 cpu = raw_smp_processor_id();
345 data = per_cpu_ptr(tr->array_buffer.data, cpu);
346 disabled = atomic_inc_return(&data->disabled);
347
348 if (likely(disabled == 1)) {
349 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
350 if (is_repeat_check(tr, last_info, ip, parent_ip))
351 goto out;
352
353 trace_ctx = tracing_gen_ctx_flags(flags);
354 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
355
356 trace_function(tr, ip, parent_ip, trace_ctx);
357 __trace_stack(tr, trace_ctx, STACK_SKIP);
358 }
359
360 out:
361 atomic_dec(&data->disabled);
362 local_irq_restore(flags);
363 }
364
365 static struct tracer_opt func_opts[] = {
366 #ifdef CONFIG_STACKTRACE
367 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
368 #endif
369 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
370 { } /* Always set a last empty entry */
371 };
372
373 static struct tracer_flags func_flags = {
374 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
375 .opts = func_opts
376 };
377
tracing_start_function_trace(struct trace_array * tr)378 static void tracing_start_function_trace(struct trace_array *tr)
379 {
380 tr->function_enabled = 0;
381 register_ftrace_function(tr->ops);
382 tr->function_enabled = 1;
383 }
384
tracing_stop_function_trace(struct trace_array * tr)385 static void tracing_stop_function_trace(struct trace_array *tr)
386 {
387 tr->function_enabled = 0;
388 unregister_ftrace_function(tr->ops);
389 }
390
391 static struct tracer function_trace;
392
393 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)394 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
395 {
396 ftrace_func_t func;
397 u32 new_flags;
398
399 /* Do nothing if already set. */
400 if (!!set == !!(func_flags.val & bit))
401 return 0;
402
403 /* We can change this flag only when not running. */
404 if (tr->current_trace != &function_trace)
405 return 0;
406
407 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
408 func = select_trace_function(new_flags);
409 if (!func)
410 return -EINVAL;
411
412 /* Check if there's anything to change. */
413 if (tr->ops->func == func)
414 return 0;
415
416 if (!handle_func_repeats(tr, new_flags))
417 return -ENOMEM;
418
419 unregister_ftrace_function(tr->ops);
420 tr->ops->func = func;
421 register_ftrace_function(tr->ops);
422
423 return 0;
424 }
425
426 static struct tracer function_trace __tracer_data =
427 {
428 .name = "function",
429 .init = function_trace_init,
430 .reset = function_trace_reset,
431 .start = function_trace_start,
432 .flags = &func_flags,
433 .set_flag = func_set_flag,
434 .allow_instances = true,
435 #ifdef CONFIG_FTRACE_SELFTEST
436 .selftest = trace_selftest_startup_function,
437 #endif
438 };
439
440 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)441 static void update_traceon_count(struct ftrace_probe_ops *ops,
442 unsigned long ip,
443 struct trace_array *tr, bool on,
444 void *data)
445 {
446 struct ftrace_func_mapper *mapper = data;
447 long *count;
448 long old_count;
449
450 /*
451 * Tracing gets disabled (or enabled) once per count.
452 * This function can be called at the same time on multiple CPUs.
453 * It is fine if both disable (or enable) tracing, as disabling
454 * (or enabling) the second time doesn't do anything as the
455 * state of the tracer is already disabled (or enabled).
456 * What needs to be synchronized in this case is that the count
457 * only gets decremented once, even if the tracer is disabled
458 * (or enabled) twice, as the second one is really a nop.
459 *
460 * The memory barriers guarantee that we only decrement the
461 * counter once. First the count is read to a local variable
462 * and a read barrier is used to make sure that it is loaded
463 * before checking if the tracer is in the state we want.
464 * If the tracer is not in the state we want, then the count
465 * is guaranteed to be the old count.
466 *
467 * Next the tracer is set to the state we want (disabled or enabled)
468 * then a write memory barrier is used to make sure that
469 * the new state is visible before changing the counter by
470 * one minus the old counter. This guarantees that another CPU
471 * executing this code will see the new state before seeing
472 * the new counter value, and would not do anything if the new
473 * counter is seen.
474 *
475 * Note, there is no synchronization between this and a user
476 * setting the tracing_on file. But we currently don't care
477 * about that.
478 */
479 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
480 old_count = *count;
481
482 if (old_count <= 0)
483 return;
484
485 /* Make sure we see count before checking tracing state */
486 smp_rmb();
487
488 if (on == !!tracer_tracing_is_on(tr))
489 return;
490
491 if (on)
492 tracer_tracing_on(tr);
493 else
494 tracer_tracing_off(tr);
495
496 /* Make sure tracing state is visible before updating count */
497 smp_wmb();
498
499 *count = old_count - 1;
500 }
501
502 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)503 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
504 struct trace_array *tr, struct ftrace_probe_ops *ops,
505 void *data)
506 {
507 update_traceon_count(ops, ip, tr, 1, data);
508 }
509
510 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)511 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
512 struct trace_array *tr, struct ftrace_probe_ops *ops,
513 void *data)
514 {
515 update_traceon_count(ops, ip, tr, 0, data);
516 }
517
518 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)519 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
520 struct trace_array *tr, struct ftrace_probe_ops *ops,
521 void *data)
522 {
523 if (tracer_tracing_is_on(tr))
524 return;
525
526 tracer_tracing_on(tr);
527 }
528
529 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)530 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
531 struct trace_array *tr, struct ftrace_probe_ops *ops,
532 void *data)
533 {
534 if (!tracer_tracing_is_on(tr))
535 return;
536
537 tracer_tracing_off(tr);
538 }
539
540 #ifdef CONFIG_UNWINDER_ORC
541 /*
542 * Skip 3:
543 *
544 * function_trace_probe_call()
545 * ftrace_ops_assist_func()
546 * ftrace_call()
547 */
548 #define FTRACE_STACK_SKIP 3
549 #else
550 /*
551 * Skip 5:
552 *
553 * __trace_stack()
554 * ftrace_stacktrace()
555 * function_trace_probe_call()
556 * ftrace_ops_assist_func()
557 * ftrace_call()
558 */
559 #define FTRACE_STACK_SKIP 5
560 #endif
561
trace_stack(struct trace_array * tr)562 static __always_inline void trace_stack(struct trace_array *tr)
563 {
564 unsigned int trace_ctx;
565
566 trace_ctx = tracing_gen_ctx();
567
568 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
569 }
570
571 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)572 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
573 struct trace_array *tr, struct ftrace_probe_ops *ops,
574 void *data)
575 {
576 trace_stack(tr);
577 }
578
579 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)580 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
581 struct trace_array *tr, struct ftrace_probe_ops *ops,
582 void *data)
583 {
584 struct ftrace_func_mapper *mapper = data;
585 long *count;
586 long old_count;
587 long new_count;
588
589 if (!tracing_is_on())
590 return;
591
592 /* unlimited? */
593 if (!mapper) {
594 trace_stack(tr);
595 return;
596 }
597
598 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
599
600 /*
601 * Stack traces should only execute the number of times the
602 * user specified in the counter.
603 */
604 do {
605 old_count = *count;
606
607 if (!old_count)
608 return;
609
610 new_count = old_count - 1;
611 new_count = cmpxchg(count, old_count, new_count);
612 if (new_count == old_count)
613 trace_stack(tr);
614
615 if (!tracing_is_on())
616 return;
617
618 } while (new_count != old_count);
619 }
620
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)621 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
622 void *data)
623 {
624 struct ftrace_func_mapper *mapper = data;
625 long *count = NULL;
626
627 if (mapper)
628 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
629
630 if (count) {
631 if (*count <= 0)
632 return 0;
633 (*count)--;
634 }
635
636 return 1;
637 }
638
639 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)640 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
641 struct trace_array *tr, struct ftrace_probe_ops *ops,
642 void *data)
643 {
644 if (update_count(ops, ip, data))
645 ftrace_dump(DUMP_ALL);
646 }
647
648 /* Only dump the current CPU buffer. */
649 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)650 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
651 struct trace_array *tr, struct ftrace_probe_ops *ops,
652 void *data)
653 {
654 if (update_count(ops, ip, data))
655 ftrace_dump(DUMP_ORIG);
656 }
657
658 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)659 ftrace_probe_print(const char *name, struct seq_file *m,
660 unsigned long ip, struct ftrace_probe_ops *ops,
661 void *data)
662 {
663 struct ftrace_func_mapper *mapper = data;
664 long *count = NULL;
665
666 seq_printf(m, "%ps:%s", (void *)ip, name);
667
668 if (mapper)
669 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
670
671 if (count)
672 seq_printf(m, ":count=%ld\n", *count);
673 else
674 seq_puts(m, ":unlimited\n");
675
676 return 0;
677 }
678
679 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)680 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
681 struct ftrace_probe_ops *ops,
682 void *data)
683 {
684 return ftrace_probe_print("traceon", m, ip, ops, data);
685 }
686
687 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)688 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
689 struct ftrace_probe_ops *ops, void *data)
690 {
691 return ftrace_probe_print("traceoff", m, ip, ops, data);
692 }
693
694 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)695 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
696 struct ftrace_probe_ops *ops, void *data)
697 {
698 return ftrace_probe_print("stacktrace", m, ip, ops, data);
699 }
700
701 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)702 ftrace_dump_print(struct seq_file *m, unsigned long ip,
703 struct ftrace_probe_ops *ops, void *data)
704 {
705 return ftrace_probe_print("dump", m, ip, ops, data);
706 }
707
708 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)709 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
710 struct ftrace_probe_ops *ops, void *data)
711 {
712 return ftrace_probe_print("cpudump", m, ip, ops, data);
713 }
714
715
716 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)717 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
718 unsigned long ip, void *init_data, void **data)
719 {
720 struct ftrace_func_mapper *mapper = *data;
721
722 if (!mapper) {
723 mapper = allocate_ftrace_func_mapper();
724 if (!mapper)
725 return -ENOMEM;
726 *data = mapper;
727 }
728
729 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
730 }
731
732 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)733 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
734 unsigned long ip, void *data)
735 {
736 struct ftrace_func_mapper *mapper = data;
737
738 if (!ip) {
739 free_ftrace_func_mapper(mapper, NULL);
740 return;
741 }
742
743 ftrace_func_mapper_remove_ip(mapper, ip);
744 }
745
746 static struct ftrace_probe_ops traceon_count_probe_ops = {
747 .func = ftrace_traceon_count,
748 .print = ftrace_traceon_print,
749 .init = ftrace_count_init,
750 .free = ftrace_count_free,
751 };
752
753 static struct ftrace_probe_ops traceoff_count_probe_ops = {
754 .func = ftrace_traceoff_count,
755 .print = ftrace_traceoff_print,
756 .init = ftrace_count_init,
757 .free = ftrace_count_free,
758 };
759
760 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
761 .func = ftrace_stacktrace_count,
762 .print = ftrace_stacktrace_print,
763 .init = ftrace_count_init,
764 .free = ftrace_count_free,
765 };
766
767 static struct ftrace_probe_ops dump_probe_ops = {
768 .func = ftrace_dump_probe,
769 .print = ftrace_dump_print,
770 .init = ftrace_count_init,
771 .free = ftrace_count_free,
772 };
773
774 static struct ftrace_probe_ops cpudump_probe_ops = {
775 .func = ftrace_cpudump_probe,
776 .print = ftrace_cpudump_print,
777 };
778
779 static struct ftrace_probe_ops traceon_probe_ops = {
780 .func = ftrace_traceon,
781 .print = ftrace_traceon_print,
782 };
783
784 static struct ftrace_probe_ops traceoff_probe_ops = {
785 .func = ftrace_traceoff,
786 .print = ftrace_traceoff_print,
787 };
788
789 static struct ftrace_probe_ops stacktrace_probe_ops = {
790 .func = ftrace_stacktrace,
791 .print = ftrace_stacktrace_print,
792 };
793
794 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)795 ftrace_trace_probe_callback(struct trace_array *tr,
796 struct ftrace_probe_ops *ops,
797 struct ftrace_hash *hash, char *glob,
798 char *cmd, char *param, int enable)
799 {
800 void *count = (void *)-1;
801 char *number;
802 int ret;
803
804 /* hash funcs only work with set_ftrace_filter */
805 if (!enable)
806 return -EINVAL;
807
808 if (glob[0] == '!')
809 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
810
811 if (!param)
812 goto out_reg;
813
814 number = strsep(¶m, ":");
815
816 if (!strlen(number))
817 goto out_reg;
818
819 /*
820 * We use the callback data field (which is a pointer)
821 * as our counter.
822 */
823 ret = kstrtoul(number, 0, (unsigned long *)&count);
824 if (ret)
825 return ret;
826
827 out_reg:
828 ret = register_ftrace_function_probe(glob, tr, ops, count);
829
830 return ret < 0 ? ret : 0;
831 }
832
833 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)834 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
835 char *glob, char *cmd, char *param, int enable)
836 {
837 struct ftrace_probe_ops *ops;
838
839 if (!tr)
840 return -ENODEV;
841
842 /* we register both traceon and traceoff to this callback */
843 if (strcmp(cmd, "traceon") == 0)
844 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
845 else
846 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
847
848 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
849 param, enable);
850 }
851
852 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)853 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
854 char *glob, char *cmd, char *param, int enable)
855 {
856 struct ftrace_probe_ops *ops;
857
858 if (!tr)
859 return -ENODEV;
860
861 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
862
863 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
864 param, enable);
865 }
866
867 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)868 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
869 char *glob, char *cmd, char *param, int enable)
870 {
871 struct ftrace_probe_ops *ops;
872
873 if (!tr)
874 return -ENODEV;
875
876 ops = &dump_probe_ops;
877
878 /* Only dump once. */
879 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
880 "1", enable);
881 }
882
883 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)884 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
885 char *glob, char *cmd, char *param, int enable)
886 {
887 struct ftrace_probe_ops *ops;
888
889 if (!tr)
890 return -ENODEV;
891
892 ops = &cpudump_probe_ops;
893
894 /* Only dump once. */
895 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
896 "1", enable);
897 }
898
899 static struct ftrace_func_command ftrace_traceon_cmd = {
900 .name = "traceon",
901 .func = ftrace_trace_onoff_callback,
902 };
903
904 static struct ftrace_func_command ftrace_traceoff_cmd = {
905 .name = "traceoff",
906 .func = ftrace_trace_onoff_callback,
907 };
908
909 static struct ftrace_func_command ftrace_stacktrace_cmd = {
910 .name = "stacktrace",
911 .func = ftrace_stacktrace_callback,
912 };
913
914 static struct ftrace_func_command ftrace_dump_cmd = {
915 .name = "dump",
916 .func = ftrace_dump_callback,
917 };
918
919 static struct ftrace_func_command ftrace_cpudump_cmd = {
920 .name = "cpudump",
921 .func = ftrace_cpudump_callback,
922 };
923
init_func_cmd_traceon(void)924 static int __init init_func_cmd_traceon(void)
925 {
926 int ret;
927
928 ret = register_ftrace_command(&ftrace_traceoff_cmd);
929 if (ret)
930 return ret;
931
932 ret = register_ftrace_command(&ftrace_traceon_cmd);
933 if (ret)
934 goto out_free_traceoff;
935
936 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
937 if (ret)
938 goto out_free_traceon;
939
940 ret = register_ftrace_command(&ftrace_dump_cmd);
941 if (ret)
942 goto out_free_stacktrace;
943
944 ret = register_ftrace_command(&ftrace_cpudump_cmd);
945 if (ret)
946 goto out_free_dump;
947
948 return 0;
949
950 out_free_dump:
951 unregister_ftrace_command(&ftrace_dump_cmd);
952 out_free_stacktrace:
953 unregister_ftrace_command(&ftrace_stacktrace_cmd);
954 out_free_traceon:
955 unregister_ftrace_command(&ftrace_traceon_cmd);
956 out_free_traceoff:
957 unregister_ftrace_command(&ftrace_traceoff_cmd);
958
959 return ret;
960 }
961 #else
init_func_cmd_traceon(void)962 static inline int init_func_cmd_traceon(void)
963 {
964 return 0;
965 }
966 #endif /* CONFIG_DYNAMIC_FTRACE */
967
init_function_trace(void)968 __init int init_function_trace(void)
969 {
970 init_func_cmd_traceon();
971 return register_tracer(&function_trace);
972 }
973