xref: /openbmc/linux/arch/arm64/include/asm/stacktrace/common.h (revision 36f9a8793c16da01dffe0718b66c884933b06b98)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Common arm64 stack unwinder code.
4  *
5  * See: arch/arm64/kernel/stacktrace.c for the reference implementation.
6  *
7  * Copyright (C) 2012 ARM Ltd.
8  */
9 #ifndef __ASM_STACKTRACE_COMMON_H
10 #define __ASM_STACKTRACE_COMMON_H
11 
12 #include <linux/bitmap.h>
13 #include <linux/bitops.h>
14 #include <linux/kprobes.h>
15 #include <linux/types.h>
16 
17 enum stack_type {
18 	STACK_TYPE_UNKNOWN,
19 	STACK_TYPE_TASK,
20 	STACK_TYPE_IRQ,
21 	STACK_TYPE_OVERFLOW,
22 	STACK_TYPE_SDEI_NORMAL,
23 	STACK_TYPE_SDEI_CRITICAL,
24 	STACK_TYPE_HYP,
25 	__NR_STACK_TYPES
26 };
27 
28 struct stack_info {
29 	unsigned long low;
30 	unsigned long high;
31 	enum stack_type type;
32 };
33 
34 /**
35  * struct unwind_state - state used for robust unwinding.
36  *
37  * @fp:          The fp value in the frame record (or the real fp)
38  * @pc:          The lr value in the frame record (or the real lr)
39  *
40  * @stacks_done: Stacks which have been entirely unwound, for which it is no
41  *               longer valid to unwind to.
42  *
43  * @prev_fp:     The fp that pointed to this frame record, or a synthetic value
44  *               of 0. This is used to ensure that within a stack, each
45  *               subsequent frame record is at an increasing address.
46  * @prev_type:   The type of stack this frame record was on, or a synthetic
47  *               value of STACK_TYPE_UNKNOWN. This is used to detect a
48  *               transition from one stack to another.
49  *
50  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
51  *               associated with the most recently encountered replacement lr
52  *               value.
53  *
54  * @task:        The task being unwound.
55  */
56 struct unwind_state {
57 	unsigned long fp;
58 	unsigned long pc;
59 	DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
60 	unsigned long prev_fp;
61 	enum stack_type prev_type;
62 #ifdef CONFIG_KRETPROBES
63 	struct llist_node *kr_cur;
64 #endif
65 	struct task_struct *task;
66 };
67 
68 static inline bool stackinfo_on_stack(const struct stack_info *info,
69 				      unsigned long sp, unsigned long size)
70 {
71 	if (!info->low)
72 		return false;
73 
74 	if (sp < info->low || sp + size < sp || sp + size > info->high)
75 		return false;
76 
77 	return true;
78 }
79 
80 static inline bool on_stack(unsigned long sp, unsigned long size,
81 			    unsigned long low, unsigned long high,
82 			    enum stack_type type, struct stack_info *info)
83 {
84 	struct stack_info tmp = {
85 		.low = low,
86 		.high = high,
87 		.type = type,
88 	};
89 
90 	if (!stackinfo_on_stack(&tmp, sp, size))
91 		return false;
92 
93 	if (info)
94 		*info = tmp;
95 
96 	return true;
97 }
98 
99 static inline void unwind_init_common(struct unwind_state *state,
100 				      struct task_struct *task)
101 {
102 	state->task = task;
103 #ifdef CONFIG_KRETPROBES
104 	state->kr_cur = NULL;
105 #endif
106 
107 	/*
108 	 * Prime the first unwind.
109 	 *
110 	 * In unwind_next() we'll check that the FP points to a valid stack,
111 	 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
112 	 * treated as a transition to whichever stack that happens to be. The
113 	 * prev_fp value won't be used, but we set it to 0 such that it is
114 	 * definitely not an accessible stack address.
115 	 */
116 	bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
117 	state->prev_fp = 0;
118 	state->prev_type = STACK_TYPE_UNKNOWN;
119 }
120 
121 /**
122  * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame
123  * pointer to a kernel address.
124  *
125  * @fp:   the frame pointer to be updated to its kernel address.
126  * @type: the stack type associated with frame pointer @fp
127  *
128  * Return: true if the VA can be translated, false otherwise.
129  *
130  * Upon success @fp is updated to the corresponding kernel virtual address.
131  */
132 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
133 					    enum stack_type type);
134 
135 /**
136  * typedef on_accessible_stack_fn() - Check whether a stack range is on any of
137  * the possible stacks.
138  *
139  * @tsk:  task whose stack is being unwound
140  * @sp:   stack address being checked
141  * @size: size of the stack range being checked
142  * @info: stack unwinding context
143  *
144  * Return: true if the stack range is accessible, false otherwise.
145  *
146  * Upon success @info is updated with information for the relevant stack.
147  *
148  * Upon failure @info is updated with the UNKNOWN stack.
149  */
150 typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
151 				       unsigned long sp, unsigned long size,
152 				       struct stack_info *info);
153 
154 /**
155  * unwind_next_frame_record() - Unwind to the next frame record.
156  *
157  * @state:        the current unwind state.
158  * @accessible:   determines whether the frame record is accessible
159  * @translate_fp: translates the fp prior to access (may be NULL)
160  *
161  * Return: 0 upon success, an error code otherwise.
162  */
163 static inline int
164 unwind_next_frame_record(struct unwind_state *state,
165 			 on_accessible_stack_fn accessible,
166 			 stack_trace_translate_fp_fn translate_fp)
167 {
168 	struct stack_info info;
169 	unsigned long fp = state->fp, kern_fp = fp;
170 	struct task_struct *tsk = state->task;
171 
172 	if (fp & 0x7)
173 		return -EINVAL;
174 
175 	if (!accessible(tsk, fp, 16, &info))
176 		return -EINVAL;
177 
178 	if (test_bit(info.type, state->stacks_done))
179 		return -EINVAL;
180 
181 	/*
182 	 * If fp is not from the current address space perform the necessary
183 	 * translation before dereferencing it to get the next fp.
184 	 */
185 	if (translate_fp && !translate_fp(&kern_fp, info.type))
186 		return -EINVAL;
187 
188 	/*
189 	 * As stacks grow downward, any valid record on the same stack must be
190 	 * at a strictly higher address than the prior record.
191 	 *
192 	 * Stacks can nest in several valid orders, e.g.
193 	 *
194 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
195 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
196 	 * HYP -> OVERFLOW
197 	 *
198 	 * ... but the nesting itself is strict. Once we transition from one
199 	 * stack to another, it's never valid to unwind back to that first
200 	 * stack.
201 	 */
202 	if (info.type == state->prev_type) {
203 		if (fp <= state->prev_fp)
204 			return -EINVAL;
205 	} else {
206 		__set_bit(state->prev_type, state->stacks_done);
207 	}
208 
209 	/*
210 	 * Record this frame record's values and location. The prev_fp and
211 	 * prev_type are only meaningful to the next unwind_next() invocation.
212 	 */
213 	state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
214 	state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
215 	state->prev_fp = fp;
216 	state->prev_type = info.type;
217 
218 	return 0;
219 }
220 
221 #endif	/* __ASM_STACKTRACE_COMMON_H */
222