xref: /openbmc/linux/arch/x86/kernel/dumpstack.c (revision a99237af)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/ftrace.h>
16 #include <linux/kexec.h>
17 #include <linux/bug.h>
18 #include <linux/nmi.h>
19 #include <linux/sysfs.h>
20 
21 #include <asm/cpu_entry_area.h>
22 #include <asm/stacktrace.h>
23 #include <asm/unwind.h>
24 
25 int panic_on_unrecovered_nmi;
26 int panic_on_io_nmi;
27 static int die_counter;
28 
29 static struct pt_regs exec_summary_regs;
30 
31 bool in_task_stack(unsigned long *stack, struct task_struct *task,
32 		   struct stack_info *info)
33 {
34 	unsigned long *begin = task_stack_page(task);
35 	unsigned long *end   = task_stack_page(task) + THREAD_SIZE;
36 
37 	if (stack < begin || stack >= end)
38 		return false;
39 
40 	info->type	= STACK_TYPE_TASK;
41 	info->begin	= begin;
42 	info->end	= end;
43 	info->next_sp	= NULL;
44 
45 	return true;
46 }
47 
48 bool in_entry_stack(unsigned long *stack, struct stack_info *info)
49 {
50 	struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
51 
52 	void *begin = ss;
53 	void *end = ss + 1;
54 
55 	if ((void *)stack < begin || (void *)stack >= end)
56 		return false;
57 
58 	info->type	= STACK_TYPE_ENTRY;
59 	info->begin	= begin;
60 	info->end	= end;
61 	info->next_sp	= NULL;
62 
63 	return true;
64 }
65 
66 static void printk_stack_address(unsigned long address, int reliable,
67 				 char *log_lvl)
68 {
69 	touch_nmi_watchdog();
70 	printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
71 }
72 
73 /*
74  * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
75  *
76  * In case where we don't have the exact kernel image (which, if we did, we can
77  * simply disassemble and navigate to the RIP), the purpose of the bigger
78  * prologue is to have more context and to be able to correlate the code from
79  * the different toolchains better.
80  *
81  * In addition, it helps in recreating the register allocation of the failing
82  * kernel and thus make sense of the register dump.
83  *
84  * What is more, the additional complication of a variable length insn arch like
85  * x86 warrants having longer byte sequence before rIP so that the disassembler
86  * can "sync" up properly and find instruction boundaries when decoding the
87  * opcode bytes.
88  *
89  * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
90  * guesstimate in attempt to achieve all of the above.
91  */
92 void show_opcodes(u8 *rip, const char *loglvl)
93 {
94 #define PROLOGUE_SIZE 42
95 #define EPILOGUE_SIZE 21
96 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
97 	u8 opcodes[OPCODE_BUFSIZE];
98 
99 	if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
100 		printk("%sCode: Bad RIP value.\n", loglvl);
101 	} else {
102 		printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
103 		       __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
104 		       opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
105 	}
106 }
107 
108 void show_ip(struct pt_regs *regs, const char *loglvl)
109 {
110 #ifdef CONFIG_X86_32
111 	printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
112 #else
113 	printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
114 #endif
115 	show_opcodes((u8 *)regs->ip, loglvl);
116 }
117 
118 void show_iret_regs(struct pt_regs *regs)
119 {
120 	show_ip(regs, KERN_DEFAULT);
121 	printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
122 		regs->sp, regs->flags);
123 }
124 
125 static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
126 				  bool partial)
127 {
128 	/*
129 	 * These on_stack() checks aren't strictly necessary: the unwind code
130 	 * has already validated the 'regs' pointer.  The checks are done for
131 	 * ordering reasons: if the registers are on the next stack, we don't
132 	 * want to print them out yet.  Otherwise they'll be shown as part of
133 	 * the wrong stack.  Later, when show_trace_log_lvl() switches to the
134 	 * next stack, this function will be called again with the same regs so
135 	 * they can be printed in the right context.
136 	 */
137 	if (!partial && on_stack(info, regs, sizeof(*regs))) {
138 		__show_regs(regs, 0);
139 
140 	} else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
141 				       IRET_FRAME_SIZE)) {
142 		/*
143 		 * When an interrupt or exception occurs in entry code, the
144 		 * full pt_regs might not have been saved yet.  In that case
145 		 * just print the iret frame.
146 		 */
147 		show_iret_regs(regs);
148 	}
149 }
150 
151 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
152 			unsigned long *stack, char *log_lvl)
153 {
154 	struct unwind_state state;
155 	struct stack_info stack_info = {0};
156 	unsigned long visit_mask = 0;
157 	int graph_idx = 0;
158 	bool partial = false;
159 
160 	printk("%sCall Trace:\n", log_lvl);
161 
162 	unwind_start(&state, task, regs, stack);
163 	stack = stack ? : get_stack_pointer(task, regs);
164 	regs = unwind_get_entry_regs(&state, &partial);
165 
166 	/*
167 	 * Iterate through the stacks, starting with the current stack pointer.
168 	 * Each stack has a pointer to the next one.
169 	 *
170 	 * x86-64 can have several stacks:
171 	 * - task stack
172 	 * - interrupt stack
173 	 * - HW exception stacks (double fault, nmi, debug, mce)
174 	 * - entry stack
175 	 *
176 	 * x86-32 can have up to four stacks:
177 	 * - task stack
178 	 * - softirq stack
179 	 * - hardirq stack
180 	 * - entry stack
181 	 */
182 	for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
183 		const char *stack_name;
184 
185 		if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
186 			/*
187 			 * We weren't on a valid stack.  It's possible that
188 			 * we overflowed a valid stack into a guard page.
189 			 * See if the next page up is valid so that we can
190 			 * generate some kind of backtrace if this happens.
191 			 */
192 			stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
193 			if (get_stack_info(stack, task, &stack_info, &visit_mask))
194 				break;
195 		}
196 
197 		stack_name = stack_type_name(stack_info.type);
198 		if (stack_name)
199 			printk("%s <%s>\n", log_lvl, stack_name);
200 
201 		if (regs)
202 			show_regs_if_on_stack(&stack_info, regs, partial);
203 
204 		/*
205 		 * Scan the stack, printing any text addresses we find.  At the
206 		 * same time, follow proper stack frames with the unwinder.
207 		 *
208 		 * Addresses found during the scan which are not reported by
209 		 * the unwinder are considered to be additional clues which are
210 		 * sometimes useful for debugging and are prefixed with '?'.
211 		 * This also serves as a failsafe option in case the unwinder
212 		 * goes off in the weeds.
213 		 */
214 		for (; stack < stack_info.end; stack++) {
215 			unsigned long real_addr;
216 			int reliable = 0;
217 			unsigned long addr = READ_ONCE_NOCHECK(*stack);
218 			unsigned long *ret_addr_p =
219 				unwind_get_return_address_ptr(&state);
220 
221 			if (!__kernel_text_address(addr))
222 				continue;
223 
224 			/*
225 			 * Don't print regs->ip again if it was already printed
226 			 * by show_regs_if_on_stack().
227 			 */
228 			if (regs && stack == &regs->ip)
229 				goto next;
230 
231 			if (stack == ret_addr_p)
232 				reliable = 1;
233 
234 			/*
235 			 * When function graph tracing is enabled for a
236 			 * function, its return address on the stack is
237 			 * replaced with the address of an ftrace handler
238 			 * (return_to_handler).  In that case, before printing
239 			 * the "real" address, we want to print the handler
240 			 * address as an "unreliable" hint that function graph
241 			 * tracing was involved.
242 			 */
243 			real_addr = ftrace_graph_ret_addr(task, &graph_idx,
244 							  addr, stack);
245 			if (real_addr != addr)
246 				printk_stack_address(addr, 0, log_lvl);
247 			printk_stack_address(real_addr, reliable, log_lvl);
248 
249 			if (!reliable)
250 				continue;
251 
252 next:
253 			/*
254 			 * Get the next frame from the unwinder.  No need to
255 			 * check for an error: if anything goes wrong, the rest
256 			 * of the addresses will just be printed as unreliable.
257 			 */
258 			unwind_next_frame(&state);
259 
260 			/* if the frame has entry regs, print them */
261 			regs = unwind_get_entry_regs(&state, &partial);
262 			if (regs)
263 				show_regs_if_on_stack(&stack_info, regs, partial);
264 		}
265 
266 		if (stack_name)
267 			printk("%s </%s>\n", log_lvl, stack_name);
268 	}
269 }
270 
271 void show_stack(struct task_struct *task, unsigned long *sp)
272 {
273 	task = task ? : current;
274 
275 	/*
276 	 * Stack frames below this one aren't interesting.  Don't show them
277 	 * if we're printing for %current.
278 	 */
279 	if (!sp && task == current)
280 		sp = get_stack_pointer(current, NULL);
281 
282 	show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
283 }
284 
285 void show_stack_regs(struct pt_regs *regs)
286 {
287 	show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
288 }
289 
290 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
291 static int die_owner = -1;
292 static unsigned int die_nest_count;
293 
294 unsigned long oops_begin(void)
295 {
296 	int cpu;
297 	unsigned long flags;
298 
299 	oops_enter();
300 
301 	/* racy, but better than risking deadlock. */
302 	raw_local_irq_save(flags);
303 	cpu = smp_processor_id();
304 	if (!arch_spin_trylock(&die_lock)) {
305 		if (cpu == die_owner)
306 			/* nested oops. should stop eventually */;
307 		else
308 			arch_spin_lock(&die_lock);
309 	}
310 	die_nest_count++;
311 	die_owner = cpu;
312 	console_verbose();
313 	bust_spinlocks(1);
314 	return flags;
315 }
316 NOKPROBE_SYMBOL(oops_begin);
317 
318 void __noreturn rewind_stack_do_exit(int signr);
319 
320 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
321 {
322 	if (regs && kexec_should_crash(current))
323 		crash_kexec(regs);
324 
325 	bust_spinlocks(0);
326 	die_owner = -1;
327 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
328 	die_nest_count--;
329 	if (!die_nest_count)
330 		/* Nest count reaches zero, release the lock. */
331 		arch_spin_unlock(&die_lock);
332 	raw_local_irq_restore(flags);
333 	oops_exit();
334 
335 	/* Executive summary in case the oops scrolled away */
336 	__show_regs(&exec_summary_regs, true);
337 
338 	if (!signr)
339 		return;
340 	if (in_interrupt())
341 		panic("Fatal exception in interrupt");
342 	if (panic_on_oops)
343 		panic("Fatal exception");
344 
345 	/*
346 	 * We're not going to return, but we might be on an IST stack or
347 	 * have very little stack space left.  Rewind the stack and kill
348 	 * the task.
349 	 */
350 	rewind_stack_do_exit(signr);
351 }
352 NOKPROBE_SYMBOL(oops_end);
353 
354 int __die(const char *str, struct pt_regs *regs, long err)
355 {
356 	/* Save the regs of the first oops for the executive summary later. */
357 	if (!die_counter)
358 		exec_summary_regs = *regs;
359 
360 	printk(KERN_DEFAULT
361 	       "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
362 	       IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT"         : "",
363 	       IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
364 	       debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
365 	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "",
366 	       IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
367 	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
368 
369 	show_regs(regs);
370 	print_modules();
371 
372 	if (notify_die(DIE_OOPS, str, regs, err,
373 			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
374 		return 1;
375 
376 	return 0;
377 }
378 NOKPROBE_SYMBOL(__die);
379 
380 /*
381  * This is gone through when something in the kernel has done something bad
382  * and is about to be terminated:
383  */
384 void die(const char *str, struct pt_regs *regs, long err)
385 {
386 	unsigned long flags = oops_begin();
387 	int sig = SIGSEGV;
388 
389 	if (__die(str, regs, err))
390 		sig = 0;
391 	oops_end(flags, regs, sig);
392 }
393 
394 void show_regs(struct pt_regs *regs)
395 {
396 	bool all = true;
397 
398 	show_regs_print_info(KERN_DEFAULT);
399 
400 	if (IS_ENABLED(CONFIG_X86_32))
401 		all = !user_mode(regs);
402 
403 	__show_regs(regs, all);
404 
405 	/*
406 	 * When in-kernel, we also print out the stack at the time of the fault..
407 	 */
408 	if (!user_mode(regs))
409 		show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
410 }
411