xref: /openbmc/linux/arch/x86/kernel/stacktrace.c (revision 9a29f5fc)
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  */
6 #include <linux/sched.h>
7 #include <linux/sched/debug.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/stacktrace.h>
10 #include <linux/export.h>
11 #include <linux/uaccess.h>
12 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
14 
15 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16 		     struct task_struct *task, struct pt_regs *regs)
17 {
18 	struct unwind_state state;
19 	unsigned long addr;
20 
21 	if (regs && !consume_entry(cookie, regs->ip))
22 		return;
23 
24 	for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
25 	     unwind_next_frame(&state)) {
26 		addr = unwind_get_return_address(&state);
27 		if (!addr || !consume_entry(cookie, addr))
28 			break;
29 	}
30 }
31 
32 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
33 			     void *cookie, struct task_struct *task)
34 {
35 	struct unwind_state state;
36 	struct pt_regs *regs;
37 	unsigned long addr;
38 
39 	for (unwind_start(&state, task, NULL, NULL);
40 	     !unwind_done(&state) && !unwind_error(&state);
41 	     unwind_next_frame(&state)) {
42 
43 		regs = unwind_get_entry_regs(&state, NULL);
44 		if (regs) {
45 			/* Success path for user tasks */
46 			if (user_mode(regs))
47 				return 0;
48 
49 			/*
50 			 * Kernel mode registers on the stack indicate an
51 			 * in-kernel interrupt or exception (e.g., preemption
52 			 * or a page fault), which can make frame pointers
53 			 * unreliable.
54 			 */
55 			if (IS_ENABLED(CONFIG_FRAME_POINTER))
56 				return -EINVAL;
57 		}
58 
59 		addr = unwind_get_return_address(&state);
60 
61 		/*
62 		 * A NULL or invalid return address probably means there's some
63 		 * generated code which __kernel_text_address() doesn't know
64 		 * about.
65 		 */
66 		if (!addr)
67 			return -EINVAL;
68 
69 		if (!consume_entry(cookie, addr))
70 			return -EINVAL;
71 	}
72 
73 	/* Check for stack corruption */
74 	if (unwind_error(&state))
75 		return -EINVAL;
76 
77 	return 0;
78 }
79 
80 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
81 
82 struct stack_frame_user {
83 	const void __user	*next_fp;
84 	unsigned long		ret_addr;
85 };
86 
87 static int
88 copy_stack_frame(const struct stack_frame_user __user *fp,
89 		 struct stack_frame_user *frame)
90 {
91 	int ret;
92 
93 	if (!__access_ok(fp, sizeof(*frame)))
94 		return 0;
95 
96 	ret = 1;
97 	pagefault_disable();
98 	if (__get_user(frame->next_fp, &fp->next_fp) ||
99 	    __get_user(frame->ret_addr, &fp->ret_addr))
100 		ret = 0;
101 	pagefault_enable();
102 
103 	return ret;
104 }
105 
106 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
107 			  const struct pt_regs *regs)
108 {
109 	const void __user *fp = (const void __user *)regs->bp;
110 
111 	if (!consume_entry(cookie, regs->ip))
112 		return;
113 
114 	while (1) {
115 		struct stack_frame_user frame;
116 
117 		frame.next_fp = NULL;
118 		frame.ret_addr = 0;
119 		if (!copy_stack_frame(fp, &frame))
120 			break;
121 		if ((unsigned long)fp < regs->sp)
122 			break;
123 		if (!frame.ret_addr)
124 			break;
125 		if (!consume_entry(cookie, frame.ret_addr))
126 			break;
127 		fp = frame.next_fp;
128 	}
129 }
130 
131