xref: /openbmc/linux/arch/arm64/kernel/stacktrace.c (revision 2dd6532e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/ftrace.h>
10 #include <linux/kprobes.h>
11 #include <linux/sched.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/stacktrace.h>
15 
16 #include <asm/irq.h>
17 #include <asm/pointer_auth.h>
18 #include <asm/stack_pointer.h>
19 #include <asm/stacktrace.h>
20 
21 /*
22  * A snapshot of a frame record or fp/lr register values, along with some
23  * accounting information necessary for robust unwinding.
24  *
25  * @fp:          The fp value in the frame record (or the real fp)
26  * @pc:          The lr value in the frame record (or the real lr)
27  *
28  * @stacks_done: Stacks which have been entirely unwound, for which it is no
29  *               longer valid to unwind to.
30  *
31  * @prev_fp:     The fp that pointed to this frame record, or a synthetic value
32  *               of 0. This is used to ensure that within a stack, each
33  *               subsequent frame record is at an increasing address.
34  * @prev_type:   The type of stack this frame record was on, or a synthetic
35  *               value of STACK_TYPE_UNKNOWN. This is used to detect a
36  *               transition from one stack to another.
37  *
38  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
39  *               associated with the most recently encountered replacement lr
40  *               value.
41  */
42 struct unwind_state {
43 	unsigned long fp;
44 	unsigned long pc;
45 	DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
46 	unsigned long prev_fp;
47 	enum stack_type prev_type;
48 #ifdef CONFIG_KRETPROBES
49 	struct llist_node *kr_cur;
50 #endif
51 };
52 
53 static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
54 				unsigned long pc)
55 {
56 	state->fp = fp;
57 	state->pc = pc;
58 #ifdef CONFIG_KRETPROBES
59 	state->kr_cur = NULL;
60 #endif
61 
62 	/*
63 	 * Prime the first unwind.
64 	 *
65 	 * In unwind_next() we'll check that the FP points to a valid stack,
66 	 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
67 	 * treated as a transition to whichever stack that happens to be. The
68 	 * prev_fp value won't be used, but we set it to 0 such that it is
69 	 * definitely not an accessible stack address.
70 	 */
71 	bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
72 	state->prev_fp = 0;
73 	state->prev_type = STACK_TYPE_UNKNOWN;
74 }
75 NOKPROBE_SYMBOL(unwind_init);
76 
77 /*
78  * Unwind from one frame record (A) to the next frame record (B).
79  *
80  * We terminate early if the location of B indicates a malformed chain of frame
81  * records (e.g. a cycle), determined based on the location and fp value of A
82  * and the location (but not the fp value) of B.
83  */
84 static int notrace unwind_next(struct task_struct *tsk,
85 			       struct unwind_state *state)
86 {
87 	unsigned long fp = state->fp;
88 	struct stack_info info;
89 
90 	/* Final frame; nothing to unwind */
91 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
92 		return -ENOENT;
93 
94 	if (fp & 0x7)
95 		return -EINVAL;
96 
97 	if (!on_accessible_stack(tsk, fp, 16, &info))
98 		return -EINVAL;
99 
100 	if (test_bit(info.type, state->stacks_done))
101 		return -EINVAL;
102 
103 	/*
104 	 * As stacks grow downward, any valid record on the same stack must be
105 	 * at a strictly higher address than the prior record.
106 	 *
107 	 * Stacks can nest in several valid orders, e.g.
108 	 *
109 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
110 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
111 	 *
112 	 * ... but the nesting itself is strict. Once we transition from one
113 	 * stack to another, it's never valid to unwind back to that first
114 	 * stack.
115 	 */
116 	if (info.type == state->prev_type) {
117 		if (fp <= state->prev_fp)
118 			return -EINVAL;
119 	} else {
120 		set_bit(state->prev_type, state->stacks_done);
121 	}
122 
123 	/*
124 	 * Record this frame record's values and location. The prev_fp and
125 	 * prev_type are only meaningful to the next unwind_next() invocation.
126 	 */
127 	state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
128 	state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
129 	state->prev_fp = fp;
130 	state->prev_type = info.type;
131 
132 	state->pc = ptrauth_strip_insn_pac(state->pc);
133 
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 	if (tsk->ret_stack &&
136 		(state->pc == (unsigned long)return_to_handler)) {
137 		unsigned long orig_pc;
138 		/*
139 		 * This is a case where function graph tracer has
140 		 * modified a return address (LR) in a stack frame
141 		 * to hook a function return.
142 		 * So replace it to an original value.
143 		 */
144 		orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
145 						(void *)state->fp);
146 		if (WARN_ON_ONCE(state->pc == orig_pc))
147 			return -EINVAL;
148 		state->pc = orig_pc;
149 	}
150 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
151 #ifdef CONFIG_KRETPROBES
152 	if (is_kretprobe_trampoline(state->pc))
153 		state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
154 #endif
155 
156 	return 0;
157 }
158 NOKPROBE_SYMBOL(unwind_next);
159 
160 static void notrace unwind(struct task_struct *tsk,
161 			   struct unwind_state *state,
162 			   stack_trace_consume_fn consume_entry, void *cookie)
163 {
164 	while (1) {
165 		int ret;
166 
167 		if (!consume_entry(cookie, state->pc))
168 			break;
169 		ret = unwind_next(tsk, state);
170 		if (ret < 0)
171 			break;
172 	}
173 }
174 NOKPROBE_SYMBOL(unwind);
175 
176 static bool dump_backtrace_entry(void *arg, unsigned long where)
177 {
178 	char *loglvl = arg;
179 	printk("%s %pSb\n", loglvl, (void *)where);
180 	return true;
181 }
182 
183 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
184 		    const char *loglvl)
185 {
186 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
187 
188 	if (regs && user_mode(regs))
189 		return;
190 
191 	if (!tsk)
192 		tsk = current;
193 
194 	if (!try_get_task_stack(tsk))
195 		return;
196 
197 	printk("%sCall trace:\n", loglvl);
198 	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
199 
200 	put_task_stack(tsk);
201 }
202 
203 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
204 {
205 	dump_backtrace(NULL, tsk, loglvl);
206 	barrier();
207 }
208 
209 noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
210 			      void *cookie, struct task_struct *task,
211 			      struct pt_regs *regs)
212 {
213 	struct unwind_state state;
214 
215 	if (regs)
216 		unwind_init(&state, regs->regs[29], regs->pc);
217 	else if (task == current)
218 		unwind_init(&state,
219 				(unsigned long)__builtin_frame_address(1),
220 				(unsigned long)__builtin_return_address(0));
221 	else
222 		unwind_init(&state, thread_saved_fp(task),
223 				thread_saved_pc(task));
224 
225 	unwind(task, &state, consume_entry, cookie);
226 }
227