xref: /openbmc/linux/arch/arm/kernel/stacktrace.c (revision fca3aa16)
1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/sched/debug.h>
4 #include <linux/stacktrace.h>
5 
6 #include <asm/sections.h>
7 #include <asm/stacktrace.h>
8 #include <asm/traps.h>
9 
10 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
11 /*
12  * Unwind the current stack frame and store the new register values in the
13  * structure passed as argument. Unwinding is equivalent to a function return,
14  * hence the new PC value rather than LR should be used for backtrace.
15  *
16  * With framepointer enabled, a simple function prologue looks like this:
17  *	mov	ip, sp
18  *	stmdb	sp!, {fp, ip, lr, pc}
19  *	sub	fp, ip, #4
20  *
21  * A simple function epilogue looks like this:
22  *	ldm	sp, {fp, sp, pc}
23  *
24  * Note that with framepointer enabled, even the leaf functions have the same
25  * prologue and epilogue, therefore we can ignore the LR value in this case.
26  */
27 int notrace unwind_frame(struct stackframe *frame)
28 {
29 	unsigned long high, low;
30 	unsigned long fp = frame->fp;
31 
32 	/* only go to a higher address on the stack */
33 	low = frame->sp;
34 	high = ALIGN(low, THREAD_SIZE);
35 
36 	/* check current frame pointer is within bounds */
37 	if (fp < low + 12 || fp > high - 4)
38 		return -EINVAL;
39 
40 	/* restore the registers from the stack frame */
41 	frame->fp = *(unsigned long *)(fp - 12);
42 	frame->sp = *(unsigned long *)(fp - 8);
43 	frame->pc = *(unsigned long *)(fp - 4);
44 
45 	return 0;
46 }
47 #endif
48 
49 void notrace walk_stackframe(struct stackframe *frame,
50 		     int (*fn)(struct stackframe *, void *), void *data)
51 {
52 	while (1) {
53 		int ret;
54 
55 		if (fn(frame, data))
56 			break;
57 		ret = unwind_frame(frame);
58 		if (ret < 0)
59 			break;
60 	}
61 }
62 EXPORT_SYMBOL(walk_stackframe);
63 
64 #ifdef CONFIG_STACKTRACE
65 struct stack_trace_data {
66 	struct stack_trace *trace;
67 	unsigned int no_sched_functions;
68 	unsigned int skip;
69 };
70 
71 static int save_trace(struct stackframe *frame, void *d)
72 {
73 	struct stack_trace_data *data = d;
74 	struct stack_trace *trace = data->trace;
75 	struct pt_regs *regs;
76 	unsigned long addr = frame->pc;
77 
78 	if (data->no_sched_functions && in_sched_functions(addr))
79 		return 0;
80 	if (data->skip) {
81 		data->skip--;
82 		return 0;
83 	}
84 
85 	trace->entries[trace->nr_entries++] = addr;
86 
87 	if (trace->nr_entries >= trace->max_entries)
88 		return 1;
89 
90 	if (!in_entry_text(frame->pc))
91 		return 0;
92 
93 	regs = (struct pt_regs *)frame->sp;
94 
95 	trace->entries[trace->nr_entries++] = regs->ARM_pc;
96 
97 	return trace->nr_entries >= trace->max_entries;
98 }
99 
100 /* This must be noinline to so that our skip calculation works correctly */
101 static noinline void __save_stack_trace(struct task_struct *tsk,
102 	struct stack_trace *trace, unsigned int nosched)
103 {
104 	struct stack_trace_data data;
105 	struct stackframe frame;
106 
107 	data.trace = trace;
108 	data.skip = trace->skip;
109 	data.no_sched_functions = nosched;
110 
111 	if (tsk != current) {
112 #ifdef CONFIG_SMP
113 		/*
114 		 * What guarantees do we have here that 'tsk' is not
115 		 * running on another CPU?  For now, ignore it as we
116 		 * can't guarantee we won't explode.
117 		 */
118 		if (trace->nr_entries < trace->max_entries)
119 			trace->entries[trace->nr_entries++] = ULONG_MAX;
120 		return;
121 #else
122 		frame.fp = thread_saved_fp(tsk);
123 		frame.sp = thread_saved_sp(tsk);
124 		frame.lr = 0;		/* recovered from the stack */
125 		frame.pc = thread_saved_pc(tsk);
126 #endif
127 	} else {
128 		/* We don't want this function nor the caller */
129 		data.skip += 2;
130 		frame.fp = (unsigned long)__builtin_frame_address(0);
131 		frame.sp = current_stack_pointer;
132 		frame.lr = (unsigned long)__builtin_return_address(0);
133 		frame.pc = (unsigned long)__save_stack_trace;
134 	}
135 
136 	walk_stackframe(&frame, save_trace, &data);
137 	if (trace->nr_entries < trace->max_entries)
138 		trace->entries[trace->nr_entries++] = ULONG_MAX;
139 }
140 
141 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
142 {
143 	struct stack_trace_data data;
144 	struct stackframe frame;
145 
146 	data.trace = trace;
147 	data.skip = trace->skip;
148 	data.no_sched_functions = 0;
149 
150 	frame.fp = regs->ARM_fp;
151 	frame.sp = regs->ARM_sp;
152 	frame.lr = regs->ARM_lr;
153 	frame.pc = regs->ARM_pc;
154 
155 	walk_stackframe(&frame, save_trace, &data);
156 	if (trace->nr_entries < trace->max_entries)
157 		trace->entries[trace->nr_entries++] = ULONG_MAX;
158 }
159 
160 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
161 {
162 	__save_stack_trace(tsk, trace, 1);
163 }
164 EXPORT_SYMBOL(save_stack_trace_tsk);
165 
166 void save_stack_trace(struct stack_trace *trace)
167 {
168 	__save_stack_trace(current, trace, 0);
169 }
170 EXPORT_SYMBOL_GPL(save_stack_trace);
171 #endif
172