xref: /openbmc/linux/arch/powerpc/kernel/stacktrace.c (revision 724ba675)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Stack trace utility functions etc.
5  *
6  * Copyright 2008 Christoph Hellwig, IBM Corp.
7  * Copyright 2018 SUSE Linux GmbH
8  * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/export.h>
13 #include <linux/kallsyms.h>
14 #include <linux/module.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/stacktrace.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <linux/ftrace.h>
23 #include <asm/kprobes.h>
24 
25 #include <asm/paca.h>
26 
27 void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
28 					   struct task_struct *task, struct pt_regs *regs)
29 {
30 	unsigned long sp;
31 
32 	if (regs && !consume_entry(cookie, regs->nip))
33 		return;
34 
35 	if (regs)
36 		sp = regs->gpr[1];
37 	else if (task == current)
38 		sp = current_stack_frame();
39 	else
40 		sp = task->thread.ksp;
41 
42 	for (;;) {
43 		unsigned long *stack = (unsigned long *) sp;
44 		unsigned long newsp, ip;
45 
46 		if (!validate_sp(sp, task))
47 			return;
48 
49 		newsp = stack[0];
50 		ip = stack[STACK_FRAME_LR_SAVE];
51 
52 		if (!consume_entry(cookie, ip))
53 			return;
54 
55 		sp = newsp;
56 	}
57 }
58 
59 /*
60  * This function returns an error if it detects any unreliable features of the
61  * stack.  Otherwise it guarantees that the stack trace is reliable.
62  *
63  * If the task is not 'current', the caller *must* ensure the task is inactive.
64  */
65 int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
66 						   void *cookie, struct task_struct *task)
67 {
68 	unsigned long sp;
69 	unsigned long newsp;
70 	unsigned long stack_page = (unsigned long)task_stack_page(task);
71 	unsigned long stack_end;
72 	int graph_idx = 0;
73 	bool firstframe;
74 
75 	stack_end = stack_page + THREAD_SIZE;
76 	if (!is_idle_task(task)) {
77 		/*
78 		 * For user tasks, this is the SP value loaded on
79 		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
80 		 * system_call_common().
81 		 *
82 		 * Likewise for non-swapper kernel threads,
83 		 * this also happens to be the top of the stack
84 		 * as setup by copy_thread().
85 		 *
86 		 * Note that stack backlinks are not properly setup by
87 		 * copy_thread() and thus, a forked task() will have
88 		 * an unreliable stack trace until it's been
89 		 * _switch()'ed to for the first time.
90 		 */
91 		stack_end -= STACK_USER_INT_FRAME_SIZE;
92 	} else {
93 		/*
94 		 * idle tasks have a custom stack layout,
95 		 * c.f. cpu_idle_thread_init().
96 		 */
97 		stack_end -= STACK_FRAME_MIN_SIZE;
98 	}
99 
100 	if (task == current)
101 		sp = current_stack_frame();
102 	else
103 		sp = task->thread.ksp;
104 
105 	if (sp < stack_page + sizeof(struct thread_struct) ||
106 	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
107 		return -EINVAL;
108 	}
109 
110 	for (firstframe = true; sp != stack_end;
111 	     firstframe = false, sp = newsp) {
112 		unsigned long *stack = (unsigned long *) sp;
113 		unsigned long ip;
114 
115 		/* sanity check: ABI requires SP to be aligned 16 bytes. */
116 		if (sp & 0xF)
117 			return -EINVAL;
118 
119 		newsp = stack[0];
120 		/* Stack grows downwards; unwinder may only go up. */
121 		if (newsp <= sp)
122 			return -EINVAL;
123 
124 		if (newsp != stack_end &&
125 		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
126 			return -EINVAL; /* invalid backlink, too far up. */
127 		}
128 
129 		/*
130 		 * We can only trust the bottom frame's backlink, the
131 		 * rest of the frame may be uninitialized, continue to
132 		 * the next.
133 		 */
134 		if (firstframe)
135 			continue;
136 
137 		/* Mark stacktraces with exception frames as unreliable. */
138 		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
139 		    stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
140 			return -EINVAL;
141 		}
142 
143 		/* Examine the saved LR: it must point into kernel code. */
144 		ip = stack[STACK_FRAME_LR_SAVE];
145 		if (!__kernel_text_address(ip))
146 			return -EINVAL;
147 
148 		/*
149 		 * FIXME: IMHO these tests do not belong in
150 		 * arch-dependent code, they are generic.
151 		 */
152 		ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
153 #ifdef CONFIG_KPROBES
154 		/*
155 		 * Mark stacktraces with kretprobed functions on them
156 		 * as unreliable.
157 		 */
158 		if (ip == (unsigned long)__kretprobe_trampoline)
159 			return -EINVAL;
160 #endif
161 
162 		if (!consume_entry(cookie, ip))
163 			return -EINVAL;
164 	}
165 	return 0;
166 }
167 
168 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
169 static void handle_backtrace_ipi(struct pt_regs *regs)
170 {
171 	nmi_cpu_backtrace(regs);
172 }
173 
174 static void raise_backtrace_ipi(cpumask_t *mask)
175 {
176 	struct paca_struct *p;
177 	unsigned int cpu;
178 	u64 delay_us;
179 
180 	for_each_cpu(cpu, mask) {
181 		if (cpu == smp_processor_id()) {
182 			handle_backtrace_ipi(NULL);
183 			continue;
184 		}
185 
186 		delay_us = 5 * USEC_PER_SEC;
187 
188 		if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
189 			// Now wait up to 5s for the other CPU to do its backtrace
190 			while (cpumask_test_cpu(cpu, mask) && delay_us) {
191 				udelay(1);
192 				delay_us--;
193 			}
194 
195 			// Other CPU cleared itself from the mask
196 			if (delay_us)
197 				continue;
198 		}
199 
200 		p = paca_ptrs[cpu];
201 
202 		cpumask_clear_cpu(cpu, mask);
203 
204 		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
205 		if (!virt_addr_valid(p)) {
206 			pr_warn("paca pointer appears corrupt? (%px)\n", p);
207 			continue;
208 		}
209 
210 		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
211 			p->irq_soft_mask, p->in_mce, p->in_nmi);
212 
213 		if (virt_addr_valid(p->__current))
214 			pr_cont(" current: %d (%s)\n", p->__current->pid,
215 				p->__current->comm);
216 		else
217 			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
218 
219 		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
220 		show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
221 	}
222 }
223 
224 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
225 {
226 	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
227 }
228 #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
229