xref: /openbmc/linux/arch/x86/kernel/stacktrace.c (revision b58c6630)
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  */
6 #include <linux/sched.h>
7 #include <linux/sched/debug.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/stacktrace.h>
10 #include <linux/export.h>
11 #include <linux/uaccess.h>
12 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
14 
15 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16 		     struct task_struct *task, struct pt_regs *regs)
17 {
18 	struct unwind_state state;
19 	unsigned long addr;
20 
21 	if (regs && !consume_entry(cookie, regs->ip, false))
22 		return;
23 
24 	for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
25 	     unwind_next_frame(&state)) {
26 		addr = unwind_get_return_address(&state);
27 		if (!addr || !consume_entry(cookie, addr, false))
28 			break;
29 	}
30 }
31 
32 /*
33  * This function returns an error if it detects any unreliable features of the
34  * stack.  Otherwise it guarantees that the stack trace is reliable.
35  *
36  * If the task is not 'current', the caller *must* ensure the task is inactive.
37  */
38 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
39 			     void *cookie, struct task_struct *task)
40 {
41 	struct unwind_state state;
42 	struct pt_regs *regs;
43 	unsigned long addr;
44 
45 	for (unwind_start(&state, task, NULL, NULL);
46 	     !unwind_done(&state) && !unwind_error(&state);
47 	     unwind_next_frame(&state)) {
48 
49 		regs = unwind_get_entry_regs(&state, NULL);
50 		if (regs) {
51 			/* Success path for user tasks */
52 			if (user_mode(regs))
53 				return 0;
54 
55 			/*
56 			 * Kernel mode registers on the stack indicate an
57 			 * in-kernel interrupt or exception (e.g., preemption
58 			 * or a page fault), which can make frame pointers
59 			 * unreliable.
60 			 */
61 
62 			if (IS_ENABLED(CONFIG_FRAME_POINTER))
63 				return -EINVAL;
64 		}
65 
66 		addr = unwind_get_return_address(&state);
67 
68 		/*
69 		 * A NULL or invalid return address probably means there's some
70 		 * generated code which __kernel_text_address() doesn't know
71 		 * about.
72 		 */
73 		if (!addr)
74 			return -EINVAL;
75 
76 		if (!consume_entry(cookie, addr, false))
77 			return -EINVAL;
78 	}
79 
80 	/* Check for stack corruption */
81 	if (unwind_error(&state))
82 		return -EINVAL;
83 
84 	/* Success path for non-user tasks, i.e. kthreads and idle tasks */
85 	if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
86 		return -EINVAL;
87 
88 	return 0;
89 }
90 
91 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
92 
93 struct stack_frame_user {
94 	const void __user	*next_fp;
95 	unsigned long		ret_addr;
96 };
97 
98 static int
99 copy_stack_frame(const struct stack_frame_user __user *fp,
100 		 struct stack_frame_user *frame)
101 {
102 	int ret;
103 
104 	if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
105 		return 0;
106 
107 	ret = 1;
108 	pagefault_disable();
109 	if (__get_user(frame->next_fp, &fp->next_fp) ||
110 	    __get_user(frame->ret_addr, &fp->ret_addr))
111 		ret = 0;
112 	pagefault_enable();
113 
114 	return ret;
115 }
116 
117 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
118 			  const struct pt_regs *regs)
119 {
120 	const void __user *fp = (const void __user *)regs->bp;
121 
122 	if (!consume_entry(cookie, regs->ip, false))
123 		return;
124 
125 	while (1) {
126 		struct stack_frame_user frame;
127 
128 		frame.next_fp = NULL;
129 		frame.ret_addr = 0;
130 		if (!copy_stack_frame(fp, &frame))
131 			break;
132 		if ((unsigned long)fp < regs->sp)
133 			break;
134 		if (!frame.ret_addr)
135 			break;
136 		if (!consume_entry(cookie, frame.ret_addr, false))
137 			break;
138 		fp = frame.next_fp;
139 	}
140 }
141 
142