xref: /openbmc/linux/arch/x86/kernel/dumpstack_64.c (revision b34e08d5)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  */
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
9 #include <linux/kdebug.h>
10 #include <linux/module.h>
11 #include <linux/ptrace.h>
12 #include <linux/kexec.h>
13 #include <linux/sysfs.h>
14 #include <linux/bug.h>
15 #include <linux/nmi.h>
16 
17 #include <asm/stacktrace.h>
18 
19 
20 #define N_EXCEPTION_STACKS_END \
21 		(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
22 
23 static char x86_stack_ids[][8] = {
24 		[ DEBUG_STACK-1			]	= "#DB",
25 		[ NMI_STACK-1			]	= "NMI",
26 		[ DOUBLEFAULT_STACK-1		]	= "#DF",
27 		[ STACKFAULT_STACK-1		]	= "#SS",
28 		[ MCE_STACK-1			]	= "#MC",
29 #if DEBUG_STKSZ > EXCEPTION_STKSZ
30 		[ N_EXCEPTION_STACKS ...
31 		  N_EXCEPTION_STACKS_END	]	= "#DB[?]"
32 #endif
33 };
34 
35 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
36 					 unsigned *usedp, char **idp)
37 {
38 	unsigned k;
39 
40 	/*
41 	 * Iterate over all exception stacks, and figure out whether
42 	 * 'stack' is in one of them:
43 	 */
44 	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
45 		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
46 		/*
47 		 * Is 'stack' above this exception frame's end?
48 		 * If yes then skip to the next frame.
49 		 */
50 		if (stack >= end)
51 			continue;
52 		/*
53 		 * Is 'stack' above this exception frame's start address?
54 		 * If yes then we found the right frame.
55 		 */
56 		if (stack >= end - EXCEPTION_STKSZ) {
57 			/*
58 			 * Make sure we only iterate through an exception
59 			 * stack once. If it comes up for the second time
60 			 * then there's something wrong going on - just
61 			 * break out and return NULL:
62 			 */
63 			if (*usedp & (1U << k))
64 				break;
65 			*usedp |= 1U << k;
66 			*idp = x86_stack_ids[k];
67 			return (unsigned long *)end;
68 		}
69 		/*
70 		 * If this is a debug stack, and if it has a larger size than
71 		 * the usual exception stacks, then 'stack' might still
72 		 * be within the lower portion of the debug stack:
73 		 */
74 #if DEBUG_STKSZ > EXCEPTION_STKSZ
75 		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
76 			unsigned j = N_EXCEPTION_STACKS - 1;
77 
78 			/*
79 			 * Black magic. A large debug stack is composed of
80 			 * multiple exception stack entries, which we
81 			 * iterate through now. Dont look:
82 			 */
83 			do {
84 				++j;
85 				end -= EXCEPTION_STKSZ;
86 				x86_stack_ids[j][4] = '1' +
87 						(j - N_EXCEPTION_STACKS);
88 			} while (stack < end - EXCEPTION_STKSZ);
89 			if (*usedp & (1U << j))
90 				break;
91 			*usedp |= 1U << j;
92 			*idp = x86_stack_ids[j];
93 			return (unsigned long *)end;
94 		}
95 #endif
96 	}
97 	return NULL;
98 }
99 
100 static inline int
101 in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
102 	     unsigned long *irq_stack_end)
103 {
104 	return (stack >= irq_stack && stack < irq_stack_end);
105 }
106 
107 static const unsigned long irq_stack_size =
108 	(IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
109 
110 enum stack_type {
111 	STACK_IS_UNKNOWN,
112 	STACK_IS_NORMAL,
113 	STACK_IS_EXCEPTION,
114 	STACK_IS_IRQ,
115 };
116 
117 static enum stack_type
118 analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
119 	      unsigned long **stack_end, unsigned long *irq_stack,
120 	      unsigned *used, char **id)
121 {
122 	unsigned long addr;
123 
124 	addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
125 	if ((unsigned long)task_stack_page(task) == addr)
126 		return STACK_IS_NORMAL;
127 
128 	*stack_end = in_exception_stack(cpu, (unsigned long)stack,
129 					used, id);
130 	if (*stack_end)
131 		return STACK_IS_EXCEPTION;
132 
133 	if (!irq_stack)
134 		return STACK_IS_NORMAL;
135 
136 	*stack_end = irq_stack;
137 	irq_stack = irq_stack - irq_stack_size;
138 
139 	if (in_irq_stack(stack, irq_stack, *stack_end))
140 		return STACK_IS_IRQ;
141 
142 	return STACK_IS_UNKNOWN;
143 }
144 
145 /*
146  * x86-64 can have up to three kernel stacks:
147  * process stack
148  * interrupt stack
149  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
150  */
151 
152 void dump_trace(struct task_struct *task, struct pt_regs *regs,
153 		unsigned long *stack, unsigned long bp,
154 		const struct stacktrace_ops *ops, void *data)
155 {
156 	const unsigned cpu = get_cpu();
157 	struct thread_info *tinfo;
158 	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
159 	unsigned long dummy;
160 	unsigned used = 0;
161 	int graph = 0;
162 	int done = 0;
163 
164 	if (!task)
165 		task = current;
166 
167 	if (!stack) {
168 		if (regs)
169 			stack = (unsigned long *)regs->sp;
170 		else if (task != current)
171 			stack = (unsigned long *)task->thread.sp;
172 		else
173 			stack = &dummy;
174 	}
175 
176 	if (!bp)
177 		bp = stack_frame(task, regs);
178 	/*
179 	 * Print function call entries in all stacks, starting at the
180 	 * current stack address. If the stacks consist of nested
181 	 * exceptions
182 	 */
183 	tinfo = task_thread_info(task);
184 	while (!done) {
185 		unsigned long *stack_end;
186 		enum stack_type stype;
187 		char *id;
188 
189 		stype = analyze_stack(cpu, task, stack, &stack_end,
190 				      irq_stack, &used, &id);
191 
192 		/* Default finish unless specified to continue */
193 		done = 1;
194 
195 		switch (stype) {
196 
197 		/* Break out early if we are on the thread stack */
198 		case STACK_IS_NORMAL:
199 			break;
200 
201 		case STACK_IS_EXCEPTION:
202 
203 			if (ops->stack(data, id) < 0)
204 				break;
205 
206 			bp = ops->walk_stack(tinfo, stack, bp, ops,
207 					     data, stack_end, &graph);
208 			ops->stack(data, "<EOE>");
209 			/*
210 			 * We link to the next stack via the
211 			 * second-to-last pointer (index -2 to end) in the
212 			 * exception stack:
213 			 */
214 			stack = (unsigned long *) stack_end[-2];
215 			done = 0;
216 			break;
217 
218 		case STACK_IS_IRQ:
219 
220 			if (ops->stack(data, "IRQ") < 0)
221 				break;
222 			bp = ops->walk_stack(tinfo, stack, bp,
223 				     ops, data, stack_end, &graph);
224 			/*
225 			 * We link to the next stack (which would be
226 			 * the process stack normally) the last
227 			 * pointer (index -1 to end) in the IRQ stack:
228 			 */
229 			stack = (unsigned long *) (stack_end[-1]);
230 			irq_stack = NULL;
231 			ops->stack(data, "EOI");
232 			done = 0;
233 			break;
234 
235 		case STACK_IS_UNKNOWN:
236 			ops->stack(data, "UNK");
237 			break;
238 		}
239 	}
240 
241 	/*
242 	 * This handles the process stack:
243 	 */
244 	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
245 	put_cpu();
246 }
247 EXPORT_SYMBOL(dump_trace);
248 
249 void
250 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
251 		   unsigned long *sp, unsigned long bp, char *log_lvl)
252 {
253 	unsigned long *irq_stack_end;
254 	unsigned long *irq_stack;
255 	unsigned long *stack;
256 	int cpu;
257 	int i;
258 
259 	preempt_disable();
260 	cpu = smp_processor_id();
261 
262 	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
263 	irq_stack	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
264 
265 	/*
266 	 * Debugging aid: "show_stack(NULL, NULL);" prints the
267 	 * back trace for this cpu:
268 	 */
269 	if (sp == NULL) {
270 		if (task)
271 			sp = (unsigned long *)task->thread.sp;
272 		else
273 			sp = (unsigned long *)&sp;
274 	}
275 
276 	stack = sp;
277 	for (i = 0; i < kstack_depth_to_print; i++) {
278 		if (stack >= irq_stack && stack <= irq_stack_end) {
279 			if (stack == irq_stack_end) {
280 				stack = (unsigned long *) (irq_stack_end[-1]);
281 				pr_cont(" <EOI> ");
282 			}
283 		} else {
284 		if (((long) stack & (THREAD_SIZE-1)) == 0)
285 			break;
286 		}
287 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
288 			pr_cont("\n");
289 		pr_cont(" %016lx", *stack++);
290 		touch_nmi_watchdog();
291 	}
292 	preempt_enable();
293 
294 	pr_cont("\n");
295 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
296 }
297 
298 void show_regs(struct pt_regs *regs)
299 {
300 	int i;
301 	unsigned long sp;
302 
303 	sp = regs->sp;
304 	show_regs_print_info(KERN_DEFAULT);
305 	__show_regs(regs, 1);
306 
307 	/*
308 	 * When in-kernel, we also print out the stack and code at the
309 	 * time of the fault..
310 	 */
311 	if (!user_mode(regs)) {
312 		unsigned int code_prologue = code_bytes * 43 / 64;
313 		unsigned int code_len = code_bytes;
314 		unsigned char c;
315 		u8 *ip;
316 
317 		printk(KERN_DEFAULT "Stack:\n");
318 		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
319 				   0, KERN_DEFAULT);
320 
321 		printk(KERN_DEFAULT "Code: ");
322 
323 		ip = (u8 *)regs->ip - code_prologue;
324 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
325 			/* try starting at IP */
326 			ip = (u8 *)regs->ip;
327 			code_len = code_len - code_prologue + 1;
328 		}
329 		for (i = 0; i < code_len; i++, ip++) {
330 			if (ip < (u8 *)PAGE_OFFSET ||
331 					probe_kernel_address(ip, c)) {
332 				pr_cont(" Bad RIP value.");
333 				break;
334 			}
335 			if (ip == (u8 *)regs->ip)
336 				pr_cont("<%02x> ", c);
337 			else
338 				pr_cont("%02x ", c);
339 		}
340 	}
341 	pr_cont("\n");
342 }
343 
344 int is_valid_bugaddr(unsigned long ip)
345 {
346 	unsigned short ud2;
347 
348 	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
349 		return 0;
350 
351 	return ud2 == 0x0b0f;
352 }
353