xref: /openbmc/linux/arch/powerpc/kernel/stacktrace.c (revision 249592bf)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Stack trace utility functions etc.
5  *
6  * Copyright 2008 Christoph Hellwig, IBM Corp.
7  * Copyright 2018 SUSE Linux GmbH
8  * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9  */
10 
11 #include <linux/export.h>
12 #include <linux/kallsyms.h>
13 #include <linux/module.h>
14 #include <linux/nmi.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/stacktrace.h>
19 #include <asm/ptrace.h>
20 #include <asm/processor.h>
21 #include <linux/ftrace.h>
22 #include <asm/kprobes.h>
23 
24 #include <asm/paca.h>
25 
26 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
27 		     struct task_struct *task, struct pt_regs *regs)
28 {
29 	unsigned long sp;
30 
31 	if (regs && !consume_entry(cookie, regs->nip))
32 		return;
33 
34 	if (regs)
35 		sp = regs->gpr[1];
36 	else if (task == current)
37 		sp = current_stack_frame();
38 	else
39 		sp = task->thread.ksp;
40 
41 	for (;;) {
42 		unsigned long *stack = (unsigned long *) sp;
43 		unsigned long newsp, ip;
44 
45 		if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
46 			return;
47 
48 		newsp = stack[0];
49 		ip = stack[STACK_FRAME_LR_SAVE];
50 
51 		if (!consume_entry(cookie, ip))
52 			return;
53 
54 		sp = newsp;
55 	}
56 }
57 
58 /*
59  * This function returns an error if it detects any unreliable features of the
60  * stack.  Otherwise it guarantees that the stack trace is reliable.
61  *
62  * If the task is not 'current', the caller *must* ensure the task is inactive.
63  */
64 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
65 			     void *cookie, struct task_struct *task)
66 {
67 	unsigned long sp;
68 	unsigned long newsp;
69 	unsigned long stack_page = (unsigned long)task_stack_page(task);
70 	unsigned long stack_end;
71 	int graph_idx = 0;
72 	bool firstframe;
73 
74 	stack_end = stack_page + THREAD_SIZE;
75 	if (!is_idle_task(task)) {
76 		/*
77 		 * For user tasks, this is the SP value loaded on
78 		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
79 		 * system_call_common()/EXCEPTION_PROLOG_COMMON().
80 		 *
81 		 * Likewise for non-swapper kernel threads,
82 		 * this also happens to be the top of the stack
83 		 * as setup by copy_thread().
84 		 *
85 		 * Note that stack backlinks are not properly setup by
86 		 * copy_thread() and thus, a forked task() will have
87 		 * an unreliable stack trace until it's been
88 		 * _switch()'ed to for the first time.
89 		 */
90 		stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
91 	} else {
92 		/*
93 		 * idle tasks have a custom stack layout,
94 		 * c.f. cpu_idle_thread_init().
95 		 */
96 		stack_end -= STACK_FRAME_OVERHEAD;
97 	}
98 
99 	if (task == current)
100 		sp = current_stack_frame();
101 	else
102 		sp = task->thread.ksp;
103 
104 	if (sp < stack_page + sizeof(struct thread_struct) ||
105 	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
106 		return -EINVAL;
107 	}
108 
109 	for (firstframe = true; sp != stack_end;
110 	     firstframe = false, sp = newsp) {
111 		unsigned long *stack = (unsigned long *) sp;
112 		unsigned long ip;
113 
114 		/* sanity check: ABI requires SP to be aligned 16 bytes. */
115 		if (sp & 0xF)
116 			return -EINVAL;
117 
118 		newsp = stack[0];
119 		/* Stack grows downwards; unwinder may only go up. */
120 		if (newsp <= sp)
121 			return -EINVAL;
122 
123 		if (newsp != stack_end &&
124 		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
125 			return -EINVAL; /* invalid backlink, too far up. */
126 		}
127 
128 		/*
129 		 * We can only trust the bottom frame's backlink, the
130 		 * rest of the frame may be uninitialized, continue to
131 		 * the next.
132 		 */
133 		if (firstframe)
134 			continue;
135 
136 		/* Mark stacktraces with exception frames as unreliable. */
137 		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
138 		    stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
139 			return -EINVAL;
140 		}
141 
142 		/* Examine the saved LR: it must point into kernel code. */
143 		ip = stack[STACK_FRAME_LR_SAVE];
144 		if (!__kernel_text_address(ip))
145 			return -EINVAL;
146 
147 		/*
148 		 * FIXME: IMHO these tests do not belong in
149 		 * arch-dependent code, they are generic.
150 		 */
151 		ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
152 #ifdef CONFIG_KPROBES
153 		/*
154 		 * Mark stacktraces with kretprobed functions on them
155 		 * as unreliable.
156 		 */
157 		if (ip == (unsigned long)kretprobe_trampoline)
158 			return -EINVAL;
159 #endif
160 
161 		if (!consume_entry(cookie, ip))
162 			return -EINVAL;
163 	}
164 	return 0;
165 }
166 
167 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
168 static void handle_backtrace_ipi(struct pt_regs *regs)
169 {
170 	nmi_cpu_backtrace(regs);
171 }
172 
173 static void raise_backtrace_ipi(cpumask_t *mask)
174 {
175 	unsigned int cpu;
176 
177 	for_each_cpu(cpu, mask) {
178 		if (cpu == smp_processor_id())
179 			handle_backtrace_ipi(NULL);
180 		else
181 			smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
182 	}
183 
184 	for_each_cpu(cpu, mask) {
185 		struct paca_struct *p = paca_ptrs[cpu];
186 
187 		cpumask_clear_cpu(cpu, mask);
188 
189 		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
190 		if (!virt_addr_valid(p)) {
191 			pr_warn("paca pointer appears corrupt? (%px)\n", p);
192 			continue;
193 		}
194 
195 		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
196 			p->irq_soft_mask, p->in_mce, p->in_nmi);
197 
198 		if (virt_addr_valid(p->__current))
199 			pr_cont(" current: %d (%s)\n", p->__current->pid,
200 				p->__current->comm);
201 		else
202 			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
203 
204 		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
205 		show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
206 	}
207 }
208 
209 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
210 {
211 	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
212 }
213 #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
214