xref: /openbmc/linux/kernel/trace/fgraph.c (revision 715f23b6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/suspend.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 
14 #include <trace/events/sched.h>
15 
16 #include "ftrace_internal.h"
17 
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 #define ASSIGN_OPS_HASH(opsname, val) \
20 	.func_hash		= val, \
21 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
22 #else
23 #define ASSIGN_OPS_HASH(opsname, val)
24 #endif
25 
26 static bool kill_ftrace_graph;
27 int ftrace_graph_active;
28 
29 /* Both enabled by default (can be cleared by function_graph tracer flags */
30 static bool fgraph_sleep_time = true;
31 
32 /**
33  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
34  *
35  * ftrace_graph_stop() is called when a severe error is detected in
36  * the function graph tracing. This function is called by the critical
37  * paths of function graph to keep those paths from doing any more harm.
38  */
39 bool ftrace_graph_is_dead(void)
40 {
41 	return kill_ftrace_graph;
42 }
43 
44 /**
45  * ftrace_graph_stop - set to permanently disable function graph tracincg
46  *
47  * In case of an error int function graph tracing, this is called
48  * to try to keep function graph tracing from causing any more harm.
49  * Usually this is pretty severe and this is called to try to at least
50  * get a warning out to the user.
51  */
52 void ftrace_graph_stop(void)
53 {
54 	kill_ftrace_graph = true;
55 }
56 
57 /* Add a function return address to the trace stack on thread info.*/
58 static int
59 ftrace_push_return_trace(unsigned long ret, unsigned long func,
60 			 unsigned long frame_pointer, unsigned long *retp)
61 {
62 	unsigned long long calltime;
63 	int index;
64 
65 	if (unlikely(ftrace_graph_is_dead()))
66 		return -EBUSY;
67 
68 	if (!current->ret_stack)
69 		return -EBUSY;
70 
71 	/*
72 	 * We must make sure the ret_stack is tested before we read
73 	 * anything else.
74 	 */
75 	smp_rmb();
76 
77 	/* The return trace stack is full */
78 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
79 		atomic_inc(&current->trace_overrun);
80 		return -EBUSY;
81 	}
82 
83 	calltime = trace_clock_local();
84 
85 	index = ++current->curr_ret_stack;
86 	barrier();
87 	current->ret_stack[index].ret = ret;
88 	current->ret_stack[index].func = func;
89 	current->ret_stack[index].calltime = calltime;
90 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
91 	current->ret_stack[index].fp = frame_pointer;
92 #endif
93 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
94 	current->ret_stack[index].retp = retp;
95 #endif
96 	return 0;
97 }
98 
99 int function_graph_enter(unsigned long ret, unsigned long func,
100 			 unsigned long frame_pointer, unsigned long *retp)
101 {
102 	struct ftrace_graph_ent trace;
103 
104 	/*
105 	 * Skip graph tracing if the return location is served by direct trampoline,
106 	 * since call sequence and return addresses is unpredicatable anymore.
107 	 * Ex: BPF trampoline may call original function and may skip frame
108 	 * depending on type of BPF programs attached.
109 	 */
110 	if (ftrace_direct_func_count &&
111 	    ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
112 		return -EBUSY;
113 	trace.func = func;
114 	trace.depth = ++current->curr_ret_depth;
115 
116 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
117 		goto out;
118 
119 	/* Only trace if the calling function expects to */
120 	if (!ftrace_graph_entry(&trace))
121 		goto out_ret;
122 
123 	return 0;
124  out_ret:
125 	current->curr_ret_stack--;
126  out:
127 	current->curr_ret_depth--;
128 	return -EBUSY;
129 }
130 
131 /* Retrieve a function return address to the trace stack on thread info.*/
132 static void
133 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
134 			unsigned long frame_pointer)
135 {
136 	int index;
137 
138 	index = current->curr_ret_stack;
139 
140 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
141 		ftrace_graph_stop();
142 		WARN_ON(1);
143 		/* Might as well panic, otherwise we have no where to go */
144 		*ret = (unsigned long)panic;
145 		return;
146 	}
147 
148 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
149 	/*
150 	 * The arch may choose to record the frame pointer used
151 	 * and check it here to make sure that it is what we expect it
152 	 * to be. If gcc does not set the place holder of the return
153 	 * address in the frame pointer, and does a copy instead, then
154 	 * the function graph trace will fail. This test detects this
155 	 * case.
156 	 *
157 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
158 	 * gcc do the above.
159 	 *
160 	 * Note, -mfentry does not use frame pointers, and this test
161 	 *  is not needed if CC_USING_FENTRY is set.
162 	 */
163 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
164 		ftrace_graph_stop();
165 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
166 		     "  from func %ps return to %lx\n",
167 		     current->ret_stack[index].fp,
168 		     frame_pointer,
169 		     (void *)current->ret_stack[index].func,
170 		     current->ret_stack[index].ret);
171 		*ret = (unsigned long)panic;
172 		return;
173 	}
174 #endif
175 
176 	*ret = current->ret_stack[index].ret;
177 	trace->func = current->ret_stack[index].func;
178 	trace->calltime = current->ret_stack[index].calltime;
179 	trace->overrun = atomic_read(&current->trace_overrun);
180 	trace->depth = current->curr_ret_depth--;
181 	/*
182 	 * We still want to trace interrupts coming in if
183 	 * max_depth is set to 1. Make sure the decrement is
184 	 * seen before ftrace_graph_return.
185 	 */
186 	barrier();
187 }
188 
189 /*
190  * Hibernation protection.
191  * The state of the current task is too much unstable during
192  * suspend/restore to disk. We want to protect against that.
193  */
194 static int
195 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
196 							void *unused)
197 {
198 	switch (state) {
199 	case PM_HIBERNATION_PREPARE:
200 		pause_graph_tracing();
201 		break;
202 
203 	case PM_POST_HIBERNATION:
204 		unpause_graph_tracing();
205 		break;
206 	}
207 	return NOTIFY_DONE;
208 }
209 
210 static struct notifier_block ftrace_suspend_notifier = {
211 	.notifier_call = ftrace_suspend_notifier_call,
212 };
213 
214 /*
215  * Send the trace to the ring-buffer.
216  * @return the original return address.
217  */
218 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
219 {
220 	struct ftrace_graph_ret trace;
221 	unsigned long ret;
222 
223 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
224 	trace.rettime = trace_clock_local();
225 	ftrace_graph_return(&trace);
226 	/*
227 	 * The ftrace_graph_return() may still access the current
228 	 * ret_stack structure, we need to make sure the update of
229 	 * curr_ret_stack is after that.
230 	 */
231 	barrier();
232 	current->curr_ret_stack--;
233 
234 	if (unlikely(!ret)) {
235 		ftrace_graph_stop();
236 		WARN_ON(1);
237 		/* Might as well panic. What else to do? */
238 		ret = (unsigned long)panic;
239 	}
240 
241 	return ret;
242 }
243 
244 /**
245  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
246  * @task: The task to read the shadow stack from
247  * @idx: Index down the shadow stack
248  *
249  * Return the ret_struct on the shadow stack of the @task at the
250  * call graph at @idx starting with zero. If @idx is zero, it
251  * will return the last saved ret_stack entry. If it is greater than
252  * zero, it will return the corresponding ret_stack for the depth
253  * of saved return addresses.
254  */
255 struct ftrace_ret_stack *
256 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
257 {
258 	idx = task->curr_ret_stack - idx;
259 
260 	if (idx >= 0 && idx <= task->curr_ret_stack)
261 		return &task->ret_stack[idx];
262 
263 	return NULL;
264 }
265 
266 /**
267  * ftrace_graph_ret_addr - convert a potentially modified stack return address
268  *			   to its original value
269  *
270  * This function can be called by stack unwinding code to convert a found stack
271  * return address ('ret') to its original value, in case the function graph
272  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
273  * been modified, the unchanged value of 'ret' is returned.
274  *
275  * 'idx' is a state variable which should be initialized by the caller to zero
276  * before the first call.
277  *
278  * 'retp' is a pointer to the return address on the stack.  It's ignored if
279  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
280  */
281 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
282 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
283 				    unsigned long ret, unsigned long *retp)
284 {
285 	int index = task->curr_ret_stack;
286 	int i;
287 
288 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
289 		return ret;
290 
291 	if (index < 0)
292 		return ret;
293 
294 	for (i = 0; i <= index; i++)
295 		if (task->ret_stack[i].retp == retp)
296 			return task->ret_stack[i].ret;
297 
298 	return ret;
299 }
300 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
301 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
302 				    unsigned long ret, unsigned long *retp)
303 {
304 	int task_idx;
305 
306 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
307 		return ret;
308 
309 	task_idx = task->curr_ret_stack;
310 
311 	if (!task->ret_stack || task_idx < *idx)
312 		return ret;
313 
314 	task_idx -= *idx;
315 	(*idx)++;
316 
317 	return task->ret_stack[task_idx].ret;
318 }
319 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
320 
321 static struct ftrace_ops graph_ops = {
322 	.func			= ftrace_stub,
323 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
324 				   FTRACE_OPS_FL_INITIALIZED |
325 				   FTRACE_OPS_FL_PID |
326 				   FTRACE_OPS_FL_STUB,
327 #ifdef FTRACE_GRAPH_TRAMP_ADDR
328 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
329 	/* trampoline_size is only needed for dynamically allocated tramps */
330 #endif
331 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
332 };
333 
334 void ftrace_graph_sleep_time_control(bool enable)
335 {
336 	fgraph_sleep_time = enable;
337 }
338 
339 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
340 {
341 	return 0;
342 }
343 
344 /*
345  * Simply points to ftrace_stub, but with the proper protocol.
346  * Defined by the linker script in linux/vmlinux.lds.h
347  */
348 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
349 
350 /* The callbacks that hook a function */
351 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
352 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
353 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
354 
355 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
356 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
357 {
358 	int i;
359 	int ret = 0;
360 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
361 	struct task_struct *g, *t;
362 
363 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
364 		ret_stack_list[i] =
365 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
366 				      sizeof(struct ftrace_ret_stack),
367 				      GFP_KERNEL);
368 		if (!ret_stack_list[i]) {
369 			start = 0;
370 			end = i;
371 			ret = -ENOMEM;
372 			goto free;
373 		}
374 	}
375 
376 	read_lock(&tasklist_lock);
377 	do_each_thread(g, t) {
378 		if (start == end) {
379 			ret = -EAGAIN;
380 			goto unlock;
381 		}
382 
383 		if (t->ret_stack == NULL) {
384 			atomic_set(&t->tracing_graph_pause, 0);
385 			atomic_set(&t->trace_overrun, 0);
386 			t->curr_ret_stack = -1;
387 			t->curr_ret_depth = -1;
388 			/* Make sure the tasks see the -1 first: */
389 			smp_wmb();
390 			t->ret_stack = ret_stack_list[start++];
391 		}
392 	} while_each_thread(g, t);
393 
394 unlock:
395 	read_unlock(&tasklist_lock);
396 free:
397 	for (i = start; i < end; i++)
398 		kfree(ret_stack_list[i]);
399 	return ret;
400 }
401 
402 static void
403 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
404 			struct task_struct *prev, struct task_struct *next)
405 {
406 	unsigned long long timestamp;
407 	int index;
408 
409 	/*
410 	 * Does the user want to count the time a function was asleep.
411 	 * If so, do not update the time stamps.
412 	 */
413 	if (fgraph_sleep_time)
414 		return;
415 
416 	timestamp = trace_clock_local();
417 
418 	prev->ftrace_timestamp = timestamp;
419 
420 	/* only process tasks that we timestamped */
421 	if (!next->ftrace_timestamp)
422 		return;
423 
424 	/*
425 	 * Update all the counters in next to make up for the
426 	 * time next was sleeping.
427 	 */
428 	timestamp -= next->ftrace_timestamp;
429 
430 	for (index = next->curr_ret_stack; index >= 0; index--)
431 		next->ret_stack[index].calltime += timestamp;
432 }
433 
434 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
435 {
436 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
437 		return 0;
438 	return __ftrace_graph_entry(trace);
439 }
440 
441 /*
442  * The function graph tracer should only trace the functions defined
443  * by set_ftrace_filter and set_ftrace_notrace. If another function
444  * tracer ops is registered, the graph tracer requires testing the
445  * function against the global ops, and not just trace any function
446  * that any ftrace_ops registered.
447  */
448 void update_function_graph_func(void)
449 {
450 	struct ftrace_ops *op;
451 	bool do_test = false;
452 
453 	/*
454 	 * The graph and global ops share the same set of functions
455 	 * to test. If any other ops is on the list, then
456 	 * the graph tracing needs to test if its the function
457 	 * it should call.
458 	 */
459 	do_for_each_ftrace_op(op, ftrace_ops_list) {
460 		if (op != &global_ops && op != &graph_ops &&
461 		    op != &ftrace_list_end) {
462 			do_test = true;
463 			/* in double loop, break out with goto */
464 			goto out;
465 		}
466 	} while_for_each_ftrace_op(op);
467  out:
468 	if (do_test)
469 		ftrace_graph_entry = ftrace_graph_entry_test;
470 	else
471 		ftrace_graph_entry = __ftrace_graph_entry;
472 }
473 
474 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
475 
476 static void
477 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
478 {
479 	atomic_set(&t->tracing_graph_pause, 0);
480 	atomic_set(&t->trace_overrun, 0);
481 	t->ftrace_timestamp = 0;
482 	/* make curr_ret_stack visible before we add the ret_stack */
483 	smp_wmb();
484 	t->ret_stack = ret_stack;
485 }
486 
487 /*
488  * Allocate a return stack for the idle task. May be the first
489  * time through, or it may be done by CPU hotplug online.
490  */
491 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
492 {
493 	t->curr_ret_stack = -1;
494 	t->curr_ret_depth = -1;
495 	/*
496 	 * The idle task has no parent, it either has its own
497 	 * stack or no stack at all.
498 	 */
499 	if (t->ret_stack)
500 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
501 
502 	if (ftrace_graph_active) {
503 		struct ftrace_ret_stack *ret_stack;
504 
505 		ret_stack = per_cpu(idle_ret_stack, cpu);
506 		if (!ret_stack) {
507 			ret_stack =
508 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
509 					      sizeof(struct ftrace_ret_stack),
510 					      GFP_KERNEL);
511 			if (!ret_stack)
512 				return;
513 			per_cpu(idle_ret_stack, cpu) = ret_stack;
514 		}
515 		graph_init_task(t, ret_stack);
516 	}
517 }
518 
519 /* Allocate a return stack for newly created task */
520 void ftrace_graph_init_task(struct task_struct *t)
521 {
522 	/* Make sure we do not use the parent ret_stack */
523 	t->ret_stack = NULL;
524 	t->curr_ret_stack = -1;
525 	t->curr_ret_depth = -1;
526 
527 	if (ftrace_graph_active) {
528 		struct ftrace_ret_stack *ret_stack;
529 
530 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
531 					  sizeof(struct ftrace_ret_stack),
532 					  GFP_KERNEL);
533 		if (!ret_stack)
534 			return;
535 		graph_init_task(t, ret_stack);
536 	}
537 }
538 
539 void ftrace_graph_exit_task(struct task_struct *t)
540 {
541 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
542 
543 	t->ret_stack = NULL;
544 	/* NULL must become visible to IRQs before we free it: */
545 	barrier();
546 
547 	kfree(ret_stack);
548 }
549 
550 /* Allocate a return stack for each task */
551 static int start_graph_tracing(void)
552 {
553 	struct ftrace_ret_stack **ret_stack_list;
554 	int ret, cpu;
555 
556 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
557 				       sizeof(struct ftrace_ret_stack *),
558 				       GFP_KERNEL);
559 
560 	if (!ret_stack_list)
561 		return -ENOMEM;
562 
563 	/* The cpu_boot init_task->ret_stack will never be freed */
564 	for_each_online_cpu(cpu) {
565 		if (!idle_task(cpu)->ret_stack)
566 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
567 	}
568 
569 	do {
570 		ret = alloc_retstack_tasklist(ret_stack_list);
571 	} while (ret == -EAGAIN);
572 
573 	if (!ret) {
574 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
575 		if (ret)
576 			pr_info("ftrace_graph: Couldn't activate tracepoint"
577 				" probe to kernel_sched_switch\n");
578 	}
579 
580 	kfree(ret_stack_list);
581 	return ret;
582 }
583 
584 int register_ftrace_graph(struct fgraph_ops *gops)
585 {
586 	int ret = 0;
587 
588 	mutex_lock(&ftrace_lock);
589 
590 	/* we currently allow only one tracer registered at a time */
591 	if (ftrace_graph_active) {
592 		ret = -EBUSY;
593 		goto out;
594 	}
595 
596 	register_pm_notifier(&ftrace_suspend_notifier);
597 
598 	ftrace_graph_active++;
599 	ret = start_graph_tracing();
600 	if (ret) {
601 		ftrace_graph_active--;
602 		goto out;
603 	}
604 
605 	ftrace_graph_return = gops->retfunc;
606 
607 	/*
608 	 * Update the indirect function to the entryfunc, and the
609 	 * function that gets called to the entry_test first. Then
610 	 * call the update fgraph entry function to determine if
611 	 * the entryfunc should be called directly or not.
612 	 */
613 	__ftrace_graph_entry = gops->entryfunc;
614 	ftrace_graph_entry = ftrace_graph_entry_test;
615 	update_function_graph_func();
616 
617 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
618 out:
619 	mutex_unlock(&ftrace_lock);
620 	return ret;
621 }
622 
623 void unregister_ftrace_graph(struct fgraph_ops *gops)
624 {
625 	mutex_lock(&ftrace_lock);
626 
627 	if (unlikely(!ftrace_graph_active))
628 		goto out;
629 
630 	ftrace_graph_active--;
631 	ftrace_graph_return = ftrace_stub_graph;
632 	ftrace_graph_entry = ftrace_graph_entry_stub;
633 	__ftrace_graph_entry = ftrace_graph_entry_stub;
634 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
635 	unregister_pm_notifier(&ftrace_suspend_notifier);
636 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
637 
638  out:
639 	mutex_unlock(&ftrace_lock);
640 }
641