xref: /openbmc/linux/arch/x86/kernel/stacktrace.c (revision c8e3dd86600a1a7b165478cc626d69bf07967c15)
1250c2277SThomas Gleixner /*
2250c2277SThomas Gleixner  * Stack trace management functions
3250c2277SThomas Gleixner  *
48f47e163SIngo Molnar  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5250c2277SThomas Gleixner  */
6250c2277SThomas Gleixner #include <linux/sched.h>
7b17b0153SIngo Molnar #include <linux/sched/debug.h>
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
9250c2277SThomas Gleixner #include <linux/stacktrace.h>
10186f4360SPaul Gortmaker #include <linux/export.h>
1102b67518STörök Edwin #include <linux/uaccess.h>
12250c2277SThomas Gleixner #include <asm/stacktrace.h>
1349a612c6SJosh Poimboeuf #include <asm/unwind.h>
14250c2277SThomas Gleixner 
153599fe12SThomas Gleixner void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
163599fe12SThomas Gleixner 		     struct task_struct *task, struct pt_regs *regs)
17018378c5SOleg Nesterov {
1849a612c6SJosh Poimboeuf 	struct unwind_state state;
1949a612c6SJosh Poimboeuf 	unsigned long addr;
2049a612c6SJosh Poimboeuf 
213599fe12SThomas Gleixner 	if (regs && !consume_entry(cookie, regs->ip, false))
223599fe12SThomas Gleixner 		return;
2349a612c6SJosh Poimboeuf 
2449a612c6SJosh Poimboeuf 	for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
2549a612c6SJosh Poimboeuf 	     unwind_next_frame(&state)) {
2649a612c6SJosh Poimboeuf 		addr = unwind_get_return_address(&state);
273599fe12SThomas Gleixner 		if (!addr || !consume_entry(cookie, addr, false))
2849a612c6SJosh Poimboeuf 			break;
29018378c5SOleg Nesterov 	}
309745512cSArjan van de Ven }
319745512cSArjan van de Ven 
32250c2277SThomas Gleixner /*
333599fe12SThomas Gleixner  * This function returns an error if it detects any unreliable features of the
343599fe12SThomas Gleixner  * stack.  Otherwise it guarantees that the stack trace is reliable.
353599fe12SThomas Gleixner  *
363599fe12SThomas Gleixner  * If the task is not 'current', the caller *must* ensure the task is inactive.
37250c2277SThomas Gleixner  */
383599fe12SThomas Gleixner int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
393599fe12SThomas Gleixner 			     void *cookie, struct task_struct *task)
40af085d90SJosh Poimboeuf {
41af085d90SJosh Poimboeuf 	struct unwind_state state;
42af085d90SJosh Poimboeuf 	struct pt_regs *regs;
43af085d90SJosh Poimboeuf 	unsigned long addr;
44af085d90SJosh Poimboeuf 
45441ccc35SJiri Slaby 	for (unwind_start(&state, task, NULL, NULL);
46441ccc35SJiri Slaby 	     !unwind_done(&state) && !unwind_error(&state);
47af085d90SJosh Poimboeuf 	     unwind_next_frame(&state)) {
48af085d90SJosh Poimboeuf 
49a9cdbe72SJosh Poimboeuf 		regs = unwind_get_entry_regs(&state, NULL);
50af085d90SJosh Poimboeuf 		if (regs) {
51441ccc35SJiri Slaby 			/* Success path for user tasks */
52441ccc35SJiri Slaby 			if (user_mode(regs))
53c5c27a0aSThomas Gleixner 				return 0;
54441ccc35SJiri Slaby 
55af085d90SJosh Poimboeuf 			/*
56af085d90SJosh Poimboeuf 			 * Kernel mode registers on the stack indicate an
57af085d90SJosh Poimboeuf 			 * in-kernel interrupt or exception (e.g., preemption
58af085d90SJosh Poimboeuf 			 * or a page fault), which can make frame pointers
59af085d90SJosh Poimboeuf 			 * unreliable.
60af085d90SJosh Poimboeuf 			 */
61af085d90SJosh Poimboeuf 
620c414367SJiri Slaby 			if (IS_ENABLED(CONFIG_FRAME_POINTER))
63441ccc35SJiri Slaby 				return -EINVAL;
64af085d90SJosh Poimboeuf 		}
65af085d90SJosh Poimboeuf 
66af085d90SJosh Poimboeuf 		addr = unwind_get_return_address(&state);
67af085d90SJosh Poimboeuf 
68af085d90SJosh Poimboeuf 		/*
69af085d90SJosh Poimboeuf 		 * A NULL or invalid return address probably means there's some
70af085d90SJosh Poimboeuf 		 * generated code which __kernel_text_address() doesn't know
71af085d90SJosh Poimboeuf 		 * about.
72af085d90SJosh Poimboeuf 		 */
7317426923SJiri Slaby 		if (!addr)
74af085d90SJosh Poimboeuf 			return -EINVAL;
75af085d90SJosh Poimboeuf 
763599fe12SThomas Gleixner 		if (!consume_entry(cookie, addr, false))
77af085d90SJosh Poimboeuf 			return -EINVAL;
78af085d90SJosh Poimboeuf 	}
79af085d90SJosh Poimboeuf 
80af085d90SJosh Poimboeuf 	/* Check for stack corruption */
8117426923SJiri Slaby 	if (unwind_error(&state))
82af085d90SJosh Poimboeuf 		return -EINVAL;
83af085d90SJosh Poimboeuf 
84441ccc35SJiri Slaby 	/* Success path for non-user tasks, i.e. kthreads and idle tasks */
85441ccc35SJiri Slaby 	if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
86441ccc35SJiri Slaby 		return -EINVAL;
87441ccc35SJiri Slaby 
88af085d90SJosh Poimboeuf 	return 0;
89af085d90SJosh Poimboeuf }
90af085d90SJosh Poimboeuf 
9102b67518STörök Edwin /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
9202b67518STörök Edwin 
93c9cf4dbbSFrederic Weisbecker struct stack_frame_user {
9402b67518STörök Edwin 	const void __user	*next_fp;
958d7c6a96STörök Edwin 	unsigned long		ret_addr;
9602b67518STörök Edwin };
9702b67518STörök Edwin 
98c9cf4dbbSFrederic Weisbecker static int
99*c8e3dd86SAl Viro copy_stack_frame(const struct stack_frame_user __user *fp,
100*c8e3dd86SAl Viro 		 struct stack_frame_user *frame)
10102b67518STörök Edwin {
10202b67518STörök Edwin 	int ret;
10302b67518STörök Edwin 
1042af7c857SEiichi Tsukata 	if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
10502b67518STörök Edwin 		return 0;
10602b67518STörök Edwin 
10702b67518STörök Edwin 	ret = 1;
10802b67518STörök Edwin 	pagefault_disable();
109*c8e3dd86SAl Viro 	if (__get_user(frame->next_fp, &fp->next_fp) ||
110*c8e3dd86SAl Viro 	    __get_user(frame->ret_addr, &fp->ret_addr))
11102b67518STörök Edwin 		ret = 0;
11202b67518STörök Edwin 	pagefault_enable();
11302b67518STörök Edwin 
11402b67518STörök Edwin 	return ret;
11502b67518STörök Edwin }
11602b67518STörök Edwin 
1173599fe12SThomas Gleixner void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
1183599fe12SThomas Gleixner 			  const struct pt_regs *regs)
11902b67518STörök Edwin {
12002b67518STörök Edwin 	const void __user *fp = (const void __user *)regs->bp;
12102b67518STörök Edwin 
1223599fe12SThomas Gleixner 	if (!consume_entry(cookie, regs->ip, false))
1233599fe12SThomas Gleixner 		return;
12402b67518STörök Edwin 
1253599fe12SThomas Gleixner 	while (1) {
126c9cf4dbbSFrederic Weisbecker 		struct stack_frame_user frame;
1278d7c6a96STörök Edwin 
12802b67518STörök Edwin 		frame.next_fp = NULL;
1298d7c6a96STörök Edwin 		frame.ret_addr = 0;
13002b67518STörök Edwin 		if (!copy_stack_frame(fp, &frame))
13102b67518STörök Edwin 			break;
13202b67518STörök Edwin 		if ((unsigned long)fp < regs->sp)
13302b67518STörök Edwin 			break;
134cbf5b73dSEiichi Tsukata 		if (!frame.ret_addr)
135cbf5b73dSEiichi Tsukata 			break;
1363599fe12SThomas Gleixner 		if (!consume_entry(cookie, frame.ret_addr, false))
13702b67518STörök Edwin 			break;
13802b67518STörök Edwin 		fp = frame.next_fp;
13902b67518STörök Edwin 	}
14002b67518STörök Edwin }
1418d7c6a96STörök Edwin 
142