xref: /openbmc/linux/arch/arm64/include/asm/stacktrace/common.h (revision 8df137300d1964c3810991aa2fe17a105348b647)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Common arm64 stack unwinder code.
4  *
5  * See: arch/arm64/kernel/stacktrace.c for the reference implementation.
6  *
7  * Copyright (C) 2012 ARM Ltd.
8  */
9 #ifndef __ASM_STACKTRACE_COMMON_H
10 #define __ASM_STACKTRACE_COMMON_H
11 
12 #include <linux/kprobes.h>
13 #include <linux/types.h>
14 
15 struct stack_info {
16 	unsigned long low;
17 	unsigned long high;
18 };
19 
20 /**
21  * struct unwind_state - state used for robust unwinding.
22  *
23  * @fp:          The fp value in the frame record (or the real fp)
24  * @pc:          The lr value in the frame record (or the real lr)
25  *
26  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
27  *               associated with the most recently encountered replacement lr
28  *               value.
29  *
30  * @task:        The task being unwound.
31  *
32  * @stack:       The stack currently being unwound.
33  * @stacks:      An array of stacks which can be unwound.
34  * @nr_stacks:   The number of stacks in @stacks.
35  */
36 struct unwind_state {
37 	unsigned long fp;
38 	unsigned long pc;
39 #ifdef CONFIG_KRETPROBES
40 	struct llist_node *kr_cur;
41 #endif
42 	struct task_struct *task;
43 
44 	struct stack_info stack;
45 	struct stack_info *stacks;
46 	int nr_stacks;
47 };
48 
49 static inline struct stack_info stackinfo_get_unknown(void)
50 {
51 	return (struct stack_info) {
52 		.low = 0,
53 		.high = 0,
54 	};
55 }
56 
57 static inline bool stackinfo_on_stack(const struct stack_info *info,
58 				      unsigned long sp, unsigned long size)
59 {
60 	if (!info->low)
61 		return false;
62 
63 	if (sp < info->low || sp + size < sp || sp + size > info->high)
64 		return false;
65 
66 	return true;
67 }
68 
69 static inline void unwind_init_common(struct unwind_state *state,
70 				      struct task_struct *task)
71 {
72 	state->task = task;
73 #ifdef CONFIG_KRETPROBES
74 	state->kr_cur = NULL;
75 #endif
76 
77 	state->stack = stackinfo_get_unknown();
78 }
79 
80 /**
81  * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame
82  * pointer to a kernel address.
83  *
84  * @fp:   the frame pointer to be updated to its kernel address.
85  *
86  * Return: true if the VA can be translated, false otherwise.
87  *
88  * Upon success @fp is updated to the corresponding kernel virtual address.
89  */
90 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp);
91 
92 static struct stack_info *unwind_find_next_stack(const struct unwind_state *state,
93 						 unsigned long sp,
94 						 unsigned long size)
95 {
96 	for (int i = 0; i < state->nr_stacks; i++) {
97 		struct stack_info *info = &state->stacks[i];
98 
99 		if (stackinfo_on_stack(info, sp, size))
100 			return info;
101 	}
102 
103 	return NULL;
104 }
105 
106 /**
107  * unwind_consume_stack() - Check if an object is on an accessible stack,
108  * updating stack boundaries so that future unwind steps cannot consume this
109  * object again.
110  *
111  * @state: the current unwind state.
112  * @sp:    the base address of the object.
113  * @size:  the size of the object.
114  *
115  * Return: 0 upon success, an error code otherwise.
116  */
117 static inline int unwind_consume_stack(struct unwind_state *state,
118 				       unsigned long sp,
119 				       unsigned long size)
120 {
121 	struct stack_info *next;
122 
123 	if (stackinfo_on_stack(&state->stack, sp, size))
124 		goto found;
125 
126 	next = unwind_find_next_stack(state, sp, size);
127 	if (!next)
128 		return -EINVAL;
129 
130 	/*
131 	 * Stack transitions are strictly one-way, and once we've
132 	 * transitioned from one stack to another, it's never valid to
133 	 * unwind back to the old stack.
134 	 *
135 	 * Remove the current stack from the list of stacks so that it cannot
136 	 * be found on a subsequent transition.
137 	 *
138 	 * Note that stacks can nest in several valid orders, e.g.
139 	 *
140 	 *   TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
141 	 *   TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
142 	 *   HYP -> OVERFLOW
143 	 *
144 	 * ... so we do not check the specific order of stack
145 	 * transitions.
146 	 */
147 	state->stack = *next;
148 	*next = stackinfo_get_unknown();
149 
150 found:
151 	/*
152 	 * Future unwind steps can only consume stack above this frame record.
153 	 * Update the current stack to start immediately above it.
154 	 */
155 	state->stack.low = sp + size;
156 	return 0;
157 }
158 
159 /**
160  * unwind_next_frame_record() - Unwind to the next frame record.
161  *
162  * @state:        the current unwind state.
163  * @translate_fp: translates the fp prior to access (may be NULL)
164  *
165  * Return: 0 upon success, an error code otherwise.
166  */
167 static inline int
168 unwind_next_frame_record(struct unwind_state *state,
169 			 stack_trace_translate_fp_fn translate_fp)
170 {
171 	unsigned long fp = state->fp, kern_fp = fp;
172 	int err;
173 
174 	if (fp & 0x7)
175 		return -EINVAL;
176 
177 	err = unwind_consume_stack(state, fp, 16);
178 	if (err)
179 		return err;
180 
181 	/*
182 	 * If fp is not from the current address space perform the necessary
183 	 * translation before dereferencing it to get the next fp.
184 	 */
185 	if (translate_fp && !translate_fp(&kern_fp))
186 		return -EINVAL;
187 
188 	/*
189 	 * Record this frame record's values.
190 	 */
191 	state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
192 	state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
193 
194 	return 0;
195 }
196 
197 #endif	/* __ASM_STACKTRACE_COMMON_H */
198