xref: /openbmc/linux/kernel/trace/fgraph.c (revision b4bc93bd76d4da32600795cd323c971f00a2e788)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/suspend.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 
14 #include <trace/events/sched.h>
15 
16 #include "ftrace_internal.h"
17 
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 #define ASSIGN_OPS_HASH(opsname, val) \
20 	.func_hash		= val, \
21 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
22 #else
23 #define ASSIGN_OPS_HASH(opsname, val)
24 #endif
25 
26 static bool kill_ftrace_graph;
27 int ftrace_graph_active;
28 
29 /* Both enabled by default (can be cleared by function_graph tracer flags */
30 static bool fgraph_sleep_time = true;
31 
32 /**
33  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
34  *
35  * ftrace_graph_stop() is called when a severe error is detected in
36  * the function graph tracing. This function is called by the critical
37  * paths of function graph to keep those paths from doing any more harm.
38  */
39 bool ftrace_graph_is_dead(void)
40 {
41 	return kill_ftrace_graph;
42 }
43 
44 /**
45  * ftrace_graph_stop - set to permanently disable function graph tracing
46  *
47  * In case of an error int function graph tracing, this is called
48  * to try to keep function graph tracing from causing any more harm.
49  * Usually this is pretty severe and this is called to try to at least
50  * get a warning out to the user.
51  */
52 void ftrace_graph_stop(void)
53 {
54 	kill_ftrace_graph = true;
55 }
56 
57 /* Add a function return address to the trace stack on thread info.*/
58 static int
59 ftrace_push_return_trace(unsigned long ret, unsigned long func,
60 			 unsigned long frame_pointer, unsigned long *retp)
61 {
62 	unsigned long long calltime;
63 	int index;
64 
65 	if (unlikely(ftrace_graph_is_dead()))
66 		return -EBUSY;
67 
68 	if (!current->ret_stack)
69 		return -EBUSY;
70 
71 	/*
72 	 * We must make sure the ret_stack is tested before we read
73 	 * anything else.
74 	 */
75 	smp_rmb();
76 
77 	/* The return trace stack is full */
78 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
79 		atomic_inc(&current->trace_overrun);
80 		return -EBUSY;
81 	}
82 
83 	calltime = trace_clock_local();
84 
85 	index = ++current->curr_ret_stack;
86 	barrier();
87 	current->ret_stack[index].ret = ret;
88 	current->ret_stack[index].func = func;
89 	current->ret_stack[index].calltime = calltime;
90 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
91 	current->ret_stack[index].fp = frame_pointer;
92 #endif
93 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
94 	current->ret_stack[index].retp = retp;
95 #endif
96 	return 0;
97 }
98 
99 /*
100  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
101  * functions. But those archs currently don't support direct functions
102  * anyway, and ftrace_find_rec_direct() is just a stub for them.
103  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
104  */
105 #ifndef MCOUNT_INSN_SIZE
106 /* Make sure this only works without direct calls */
107 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
108 #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
109 # endif
110 # define MCOUNT_INSN_SIZE 0
111 #endif
112 
113 int function_graph_enter(unsigned long ret, unsigned long func,
114 			 unsigned long frame_pointer, unsigned long *retp)
115 {
116 	struct ftrace_graph_ent trace;
117 
118 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
119 	/*
120 	 * Skip graph tracing if the return location is served by direct trampoline,
121 	 * since call sequence and return addresses are unpredictable anyway.
122 	 * Ex: BPF trampoline may call original function and may skip frame
123 	 * depending on type of BPF programs attached.
124 	 */
125 	if (ftrace_direct_func_count &&
126 	    ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
127 		return -EBUSY;
128 #endif
129 	trace.func = func;
130 	trace.depth = ++current->curr_ret_depth;
131 
132 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
133 		goto out;
134 
135 	/* Only trace if the calling function expects to */
136 	if (!ftrace_graph_entry(&trace))
137 		goto out_ret;
138 
139 	return 0;
140  out_ret:
141 	current->curr_ret_stack--;
142  out:
143 	current->curr_ret_depth--;
144 	return -EBUSY;
145 }
146 
147 /* Retrieve a function return address to the trace stack on thread info.*/
148 static void
149 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
150 			unsigned long frame_pointer)
151 {
152 	int index;
153 
154 	index = current->curr_ret_stack;
155 
156 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
157 		ftrace_graph_stop();
158 		WARN_ON(1);
159 		/* Might as well panic, otherwise we have no where to go */
160 		*ret = (unsigned long)panic;
161 		return;
162 	}
163 
164 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
165 	/*
166 	 * The arch may choose to record the frame pointer used
167 	 * and check it here to make sure that it is what we expect it
168 	 * to be. If gcc does not set the place holder of the return
169 	 * address in the frame pointer, and does a copy instead, then
170 	 * the function graph trace will fail. This test detects this
171 	 * case.
172 	 *
173 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
174 	 * gcc do the above.
175 	 *
176 	 * Note, -mfentry does not use frame pointers, and this test
177 	 *  is not needed if CC_USING_FENTRY is set.
178 	 */
179 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
180 		ftrace_graph_stop();
181 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
182 		     "  from func %ps return to %lx\n",
183 		     current->ret_stack[index].fp,
184 		     frame_pointer,
185 		     (void *)current->ret_stack[index].func,
186 		     current->ret_stack[index].ret);
187 		*ret = (unsigned long)panic;
188 		return;
189 	}
190 #endif
191 
192 	*ret = current->ret_stack[index].ret;
193 	trace->func = current->ret_stack[index].func;
194 	trace->calltime = current->ret_stack[index].calltime;
195 	trace->overrun = atomic_read(&current->trace_overrun);
196 	trace->depth = current->curr_ret_depth--;
197 	/*
198 	 * We still want to trace interrupts coming in if
199 	 * max_depth is set to 1. Make sure the decrement is
200 	 * seen before ftrace_graph_return.
201 	 */
202 	barrier();
203 }
204 
205 /*
206  * Hibernation protection.
207  * The state of the current task is too much unstable during
208  * suspend/restore to disk. We want to protect against that.
209  */
210 static int
211 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
212 							void *unused)
213 {
214 	switch (state) {
215 	case PM_HIBERNATION_PREPARE:
216 		pause_graph_tracing();
217 		break;
218 
219 	case PM_POST_HIBERNATION:
220 		unpause_graph_tracing();
221 		break;
222 	}
223 	return NOTIFY_DONE;
224 }
225 
226 static struct notifier_block ftrace_suspend_notifier = {
227 	.notifier_call = ftrace_suspend_notifier_call,
228 };
229 
230 /*
231  * Send the trace to the ring-buffer.
232  * @return the original return address.
233  */
234 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
235 {
236 	struct ftrace_graph_ret trace;
237 	unsigned long ret;
238 
239 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
240 	trace.rettime = trace_clock_local();
241 	ftrace_graph_return(&trace);
242 	/*
243 	 * The ftrace_graph_return() may still access the current
244 	 * ret_stack structure, we need to make sure the update of
245 	 * curr_ret_stack is after that.
246 	 */
247 	barrier();
248 	current->curr_ret_stack--;
249 
250 	if (unlikely(!ret)) {
251 		ftrace_graph_stop();
252 		WARN_ON(1);
253 		/* Might as well panic. What else to do? */
254 		ret = (unsigned long)panic;
255 	}
256 
257 	return ret;
258 }
259 
260 /**
261  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
262  * @task: The task to read the shadow stack from
263  * @idx: Index down the shadow stack
264  *
265  * Return the ret_struct on the shadow stack of the @task at the
266  * call graph at @idx starting with zero. If @idx is zero, it
267  * will return the last saved ret_stack entry. If it is greater than
268  * zero, it will return the corresponding ret_stack for the depth
269  * of saved return addresses.
270  */
271 struct ftrace_ret_stack *
272 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
273 {
274 	idx = task->curr_ret_stack - idx;
275 
276 	if (idx >= 0 && idx <= task->curr_ret_stack)
277 		return &task->ret_stack[idx];
278 
279 	return NULL;
280 }
281 
282 /**
283  * ftrace_graph_ret_addr - convert a potentially modified stack return address
284  *			   to its original value
285  *
286  * This function can be called by stack unwinding code to convert a found stack
287  * return address ('ret') to its original value, in case the function graph
288  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
289  * been modified, the unchanged value of 'ret' is returned.
290  *
291  * 'idx' is a state variable which should be initialized by the caller to zero
292  * before the first call.
293  *
294  * 'retp' is a pointer to the return address on the stack.  It's ignored if
295  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
296  */
297 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
298 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
299 				    unsigned long ret, unsigned long *retp)
300 {
301 	int index = task->curr_ret_stack;
302 	int i;
303 
304 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
305 		return ret;
306 
307 	if (index < 0)
308 		return ret;
309 
310 	for (i = 0; i <= index; i++)
311 		if (task->ret_stack[i].retp == retp)
312 			return task->ret_stack[i].ret;
313 
314 	return ret;
315 }
316 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
317 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
318 				    unsigned long ret, unsigned long *retp)
319 {
320 	int task_idx;
321 
322 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
323 		return ret;
324 
325 	task_idx = task->curr_ret_stack;
326 
327 	if (!task->ret_stack || task_idx < *idx)
328 		return ret;
329 
330 	task_idx -= *idx;
331 	(*idx)++;
332 
333 	return task->ret_stack[task_idx].ret;
334 }
335 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
336 
337 static struct ftrace_ops graph_ops = {
338 	.func			= ftrace_graph_func,
339 	.flags			= FTRACE_OPS_FL_INITIALIZED |
340 				   FTRACE_OPS_FL_PID |
341 				   FTRACE_OPS_GRAPH_STUB,
342 #ifdef FTRACE_GRAPH_TRAMP_ADDR
343 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
344 	/* trampoline_size is only needed for dynamically allocated tramps */
345 #endif
346 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
347 };
348 
349 void ftrace_graph_sleep_time_control(bool enable)
350 {
351 	fgraph_sleep_time = enable;
352 }
353 
354 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
355 {
356 	return 0;
357 }
358 
359 /*
360  * Simply points to ftrace_stub, but with the proper protocol.
361  * Defined by the linker script in linux/vmlinux.lds.h
362  */
363 extern void ftrace_stub_graph(struct ftrace_graph_ret *);
364 
365 /* The callbacks that hook a function */
366 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
367 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
368 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
369 
370 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
371 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
372 {
373 	int i;
374 	int ret = 0;
375 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
376 	struct task_struct *g, *t;
377 
378 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
379 		ret_stack_list[i] =
380 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
381 				      sizeof(struct ftrace_ret_stack),
382 				      GFP_KERNEL);
383 		if (!ret_stack_list[i]) {
384 			start = 0;
385 			end = i;
386 			ret = -ENOMEM;
387 			goto free;
388 		}
389 	}
390 
391 	rcu_read_lock();
392 	for_each_process_thread(g, t) {
393 		if (start == end) {
394 			ret = -EAGAIN;
395 			goto unlock;
396 		}
397 
398 		if (t->ret_stack == NULL) {
399 			atomic_set(&t->trace_overrun, 0);
400 			t->curr_ret_stack = -1;
401 			t->curr_ret_depth = -1;
402 			/* Make sure the tasks see the -1 first: */
403 			smp_wmb();
404 			t->ret_stack = ret_stack_list[start++];
405 		}
406 	}
407 
408 unlock:
409 	rcu_read_unlock();
410 free:
411 	for (i = start; i < end; i++)
412 		kfree(ret_stack_list[i]);
413 	return ret;
414 }
415 
416 static void
417 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
418 				unsigned int prev_state,
419 				struct task_struct *prev,
420 				struct task_struct *next)
421 {
422 	unsigned long long timestamp;
423 	int index;
424 
425 	/*
426 	 * Does the user want to count the time a function was asleep.
427 	 * If so, do not update the time stamps.
428 	 */
429 	if (fgraph_sleep_time)
430 		return;
431 
432 	timestamp = trace_clock_local();
433 
434 	prev->ftrace_timestamp = timestamp;
435 
436 	/* only process tasks that we timestamped */
437 	if (!next->ftrace_timestamp)
438 		return;
439 
440 	/*
441 	 * Update all the counters in next to make up for the
442 	 * time next was sleeping.
443 	 */
444 	timestamp -= next->ftrace_timestamp;
445 
446 	for (index = next->curr_ret_stack; index >= 0; index--)
447 		next->ret_stack[index].calltime += timestamp;
448 }
449 
450 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
451 {
452 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
453 		return 0;
454 	return __ftrace_graph_entry(trace);
455 }
456 
457 /*
458  * The function graph tracer should only trace the functions defined
459  * by set_ftrace_filter and set_ftrace_notrace. If another function
460  * tracer ops is registered, the graph tracer requires testing the
461  * function against the global ops, and not just trace any function
462  * that any ftrace_ops registered.
463  */
464 void update_function_graph_func(void)
465 {
466 	struct ftrace_ops *op;
467 	bool do_test = false;
468 
469 	/*
470 	 * The graph and global ops share the same set of functions
471 	 * to test. If any other ops is on the list, then
472 	 * the graph tracing needs to test if its the function
473 	 * it should call.
474 	 */
475 	do_for_each_ftrace_op(op, ftrace_ops_list) {
476 		if (op != &global_ops && op != &graph_ops &&
477 		    op != &ftrace_list_end) {
478 			do_test = true;
479 			/* in double loop, break out with goto */
480 			goto out;
481 		}
482 	} while_for_each_ftrace_op(op);
483  out:
484 	if (do_test)
485 		ftrace_graph_entry = ftrace_graph_entry_test;
486 	else
487 		ftrace_graph_entry = __ftrace_graph_entry;
488 }
489 
490 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
491 
492 static void
493 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
494 {
495 	atomic_set(&t->trace_overrun, 0);
496 	t->ftrace_timestamp = 0;
497 	/* make curr_ret_stack visible before we add the ret_stack */
498 	smp_wmb();
499 	t->ret_stack = ret_stack;
500 }
501 
502 /*
503  * Allocate a return stack for the idle task. May be the first
504  * time through, or it may be done by CPU hotplug online.
505  */
506 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
507 {
508 	t->curr_ret_stack = -1;
509 	t->curr_ret_depth = -1;
510 	/*
511 	 * The idle task has no parent, it either has its own
512 	 * stack or no stack at all.
513 	 */
514 	if (t->ret_stack)
515 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
516 
517 	if (ftrace_graph_active) {
518 		struct ftrace_ret_stack *ret_stack;
519 
520 		ret_stack = per_cpu(idle_ret_stack, cpu);
521 		if (!ret_stack) {
522 			ret_stack =
523 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
524 					      sizeof(struct ftrace_ret_stack),
525 					      GFP_KERNEL);
526 			if (!ret_stack)
527 				return;
528 			per_cpu(idle_ret_stack, cpu) = ret_stack;
529 		}
530 		graph_init_task(t, ret_stack);
531 	}
532 }
533 
534 /* Allocate a return stack for newly created task */
535 void ftrace_graph_init_task(struct task_struct *t)
536 {
537 	/* Make sure we do not use the parent ret_stack */
538 	t->ret_stack = NULL;
539 	t->curr_ret_stack = -1;
540 	t->curr_ret_depth = -1;
541 
542 	if (ftrace_graph_active) {
543 		struct ftrace_ret_stack *ret_stack;
544 
545 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
546 					  sizeof(struct ftrace_ret_stack),
547 					  GFP_KERNEL);
548 		if (!ret_stack)
549 			return;
550 		graph_init_task(t, ret_stack);
551 	}
552 }
553 
554 void ftrace_graph_exit_task(struct task_struct *t)
555 {
556 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
557 
558 	t->ret_stack = NULL;
559 	/* NULL must become visible to IRQs before we free it: */
560 	barrier();
561 
562 	kfree(ret_stack);
563 }
564 
565 /* Allocate a return stack for each task */
566 static int start_graph_tracing(void)
567 {
568 	struct ftrace_ret_stack **ret_stack_list;
569 	int ret, cpu;
570 
571 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
572 				       sizeof(struct ftrace_ret_stack *),
573 				       GFP_KERNEL);
574 
575 	if (!ret_stack_list)
576 		return -ENOMEM;
577 
578 	/* The cpu_boot init_task->ret_stack will never be freed */
579 	for_each_online_cpu(cpu) {
580 		if (!idle_task(cpu)->ret_stack)
581 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
582 	}
583 
584 	do {
585 		ret = alloc_retstack_tasklist(ret_stack_list);
586 	} while (ret == -EAGAIN);
587 
588 	if (!ret) {
589 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
590 		if (ret)
591 			pr_info("ftrace_graph: Couldn't activate tracepoint"
592 				" probe to kernel_sched_switch\n");
593 	}
594 
595 	kfree(ret_stack_list);
596 	return ret;
597 }
598 
599 int register_ftrace_graph(struct fgraph_ops *gops)
600 {
601 	int ret = 0;
602 
603 	mutex_lock(&ftrace_lock);
604 
605 	/* we currently allow only one tracer registered at a time */
606 	if (ftrace_graph_active) {
607 		ret = -EBUSY;
608 		goto out;
609 	}
610 
611 	register_pm_notifier(&ftrace_suspend_notifier);
612 
613 	ftrace_graph_active++;
614 	ret = start_graph_tracing();
615 	if (ret) {
616 		ftrace_graph_active--;
617 		goto out;
618 	}
619 
620 	ftrace_graph_return = gops->retfunc;
621 
622 	/*
623 	 * Update the indirect function to the entryfunc, and the
624 	 * function that gets called to the entry_test first. Then
625 	 * call the update fgraph entry function to determine if
626 	 * the entryfunc should be called directly or not.
627 	 */
628 	__ftrace_graph_entry = gops->entryfunc;
629 	ftrace_graph_entry = ftrace_graph_entry_test;
630 	update_function_graph_func();
631 
632 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
633 out:
634 	mutex_unlock(&ftrace_lock);
635 	return ret;
636 }
637 
638 void unregister_ftrace_graph(struct fgraph_ops *gops)
639 {
640 	mutex_lock(&ftrace_lock);
641 
642 	if (unlikely(!ftrace_graph_active))
643 		goto out;
644 
645 	ftrace_graph_active--;
646 	ftrace_graph_return = ftrace_stub_graph;
647 	ftrace_graph_entry = ftrace_graph_entry_stub;
648 	__ftrace_graph_entry = ftrace_graph_entry_stub;
649 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
650 	unregister_pm_notifier(&ftrace_suspend_notifier);
651 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
652 
653  out:
654 	mutex_unlock(&ftrace_lock);
655 }
656