1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 */ 5 #include <linux/kallsyms.h> 6 #include <linux/kprobes.h> 7 #include <linux/uaccess.h> 8 #include <linux/hardirq.h> 9 #include <linux/kdebug.h> 10 #include <linux/module.h> 11 #include <linux/ptrace.h> 12 #include <linux/kexec.h> 13 #include <linux/sysfs.h> 14 #include <linux/bug.h> 15 #include <linux/nmi.h> 16 17 #include <asm/stacktrace.h> 18 19 20 #define N_EXCEPTION_STACKS_END \ 21 (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) 22 23 static char x86_stack_ids[][8] = { 24 [ DEBUG_STACK-1 ] = "#DB", 25 [ NMI_STACK-1 ] = "NMI", 26 [ DOUBLEFAULT_STACK-1 ] = "#DF", 27 [ STACKFAULT_STACK-1 ] = "#SS", 28 [ MCE_STACK-1 ] = "#MC", 29 #if DEBUG_STKSZ > EXCEPTION_STKSZ 30 [ N_EXCEPTION_STACKS ... 31 N_EXCEPTION_STACKS_END ] = "#DB[?]" 32 #endif 33 }; 34 35 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 36 unsigned *usedp, char **idp) 37 { 38 unsigned k; 39 40 /* 41 * Iterate over all exception stacks, and figure out whether 42 * 'stack' is in one of them: 43 */ 44 for (k = 0; k < N_EXCEPTION_STACKS; k++) { 45 unsigned long end = per_cpu(orig_ist, cpu).ist[k]; 46 /* 47 * Is 'stack' above this exception frame's end? 48 * If yes then skip to the next frame. 49 */ 50 if (stack >= end) 51 continue; 52 /* 53 * Is 'stack' above this exception frame's start address? 54 * If yes then we found the right frame. 55 */ 56 if (stack >= end - EXCEPTION_STKSZ) { 57 /* 58 * Make sure we only iterate through an exception 59 * stack once. If it comes up for the second time 60 * then there's something wrong going on - just 61 * break out and return NULL: 62 */ 63 if (*usedp & (1U << k)) 64 break; 65 *usedp |= 1U << k; 66 *idp = x86_stack_ids[k]; 67 return (unsigned long *)end; 68 } 69 /* 70 * If this is a debug stack, and if it has a larger size than 71 * the usual exception stacks, then 'stack' might still 72 * be within the lower portion of the debug stack: 73 */ 74 #if DEBUG_STKSZ > EXCEPTION_STKSZ 75 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) { 76 unsigned j = N_EXCEPTION_STACKS - 1; 77 78 /* 79 * Black magic. A large debug stack is composed of 80 * multiple exception stack entries, which we 81 * iterate through now. Dont look: 82 */ 83 do { 84 ++j; 85 end -= EXCEPTION_STKSZ; 86 x86_stack_ids[j][4] = '1' + 87 (j - N_EXCEPTION_STACKS); 88 } while (stack < end - EXCEPTION_STKSZ); 89 if (*usedp & (1U << j)) 90 break; 91 *usedp |= 1U << j; 92 *idp = x86_stack_ids[j]; 93 return (unsigned long *)end; 94 } 95 #endif 96 } 97 return NULL; 98 } 99 100 static inline int 101 in_irq_stack(unsigned long *stack, unsigned long *irq_stack, 102 unsigned long *irq_stack_end) 103 { 104 return (stack >= irq_stack && stack < irq_stack_end); 105 } 106 107 /* 108 * x86-64 can have up to three kernel stacks: 109 * process stack 110 * interrupt stack 111 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 112 */ 113 114 void dump_trace(struct task_struct *task, struct pt_regs *regs, 115 unsigned long *stack, unsigned long bp, 116 const struct stacktrace_ops *ops, void *data) 117 { 118 const unsigned cpu = get_cpu(); 119 unsigned long *irq_stack_end = 120 (unsigned long *)per_cpu(irq_stack_ptr, cpu); 121 unsigned used = 0; 122 struct thread_info *tinfo; 123 int graph = 0; 124 unsigned long dummy; 125 126 if (!task) 127 task = current; 128 129 if (!stack) { 130 if (regs) 131 stack = (unsigned long *)regs->sp; 132 else if (task != current) 133 stack = (unsigned long *)task->thread.sp; 134 else 135 stack = &dummy; 136 } 137 138 if (!bp) 139 bp = stack_frame(task, regs); 140 /* 141 * Print function call entries in all stacks, starting at the 142 * current stack address. If the stacks consist of nested 143 * exceptions 144 */ 145 tinfo = task_thread_info(task); 146 for (;;) { 147 char *id; 148 unsigned long *estack_end; 149 estack_end = in_exception_stack(cpu, (unsigned long)stack, 150 &used, &id); 151 152 if (estack_end) { 153 if (ops->stack(data, id) < 0) 154 break; 155 156 bp = ops->walk_stack(tinfo, stack, bp, ops, 157 data, estack_end, &graph); 158 ops->stack(data, "<EOE>"); 159 /* 160 * We link to the next stack via the 161 * second-to-last pointer (index -2 to end) in the 162 * exception stack: 163 */ 164 stack = (unsigned long *) estack_end[-2]; 165 continue; 166 } 167 if (irq_stack_end) { 168 unsigned long *irq_stack; 169 irq_stack = irq_stack_end - 170 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); 171 172 if (in_irq_stack(stack, irq_stack, irq_stack_end)) { 173 if (ops->stack(data, "IRQ") < 0) 174 break; 175 bp = ops->walk_stack(tinfo, stack, bp, 176 ops, data, irq_stack_end, &graph); 177 /* 178 * We link to the next stack (which would be 179 * the process stack normally) the last 180 * pointer (index -1 to end) in the IRQ stack: 181 */ 182 stack = (unsigned long *) (irq_stack_end[-1]); 183 irq_stack_end = NULL; 184 ops->stack(data, "EOI"); 185 continue; 186 } 187 } 188 break; 189 } 190 191 /* 192 * This handles the process stack: 193 */ 194 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); 195 put_cpu(); 196 } 197 EXPORT_SYMBOL(dump_trace); 198 199 void 200 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 201 unsigned long *sp, unsigned long bp, char *log_lvl) 202 { 203 unsigned long *irq_stack_end; 204 unsigned long *irq_stack; 205 unsigned long *stack; 206 int cpu; 207 int i; 208 209 preempt_disable(); 210 cpu = smp_processor_id(); 211 212 irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); 213 irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); 214 215 /* 216 * Debugging aid: "show_stack(NULL, NULL);" prints the 217 * back trace for this cpu: 218 */ 219 if (sp == NULL) { 220 if (task) 221 sp = (unsigned long *)task->thread.sp; 222 else 223 sp = (unsigned long *)&sp; 224 } 225 226 stack = sp; 227 for (i = 0; i < kstack_depth_to_print; i++) { 228 if (stack >= irq_stack && stack <= irq_stack_end) { 229 if (stack == irq_stack_end) { 230 stack = (unsigned long *) (irq_stack_end[-1]); 231 pr_cont(" <EOI> "); 232 } 233 } else { 234 if (((long) stack & (THREAD_SIZE-1)) == 0) 235 break; 236 } 237 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 238 pr_cont("\n"); 239 pr_cont(" %016lx", *stack++); 240 touch_nmi_watchdog(); 241 } 242 preempt_enable(); 243 244 pr_cont("\n"); 245 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 246 } 247 248 void show_regs(struct pt_regs *regs) 249 { 250 int i; 251 unsigned long sp; 252 const int cpu = smp_processor_id(); 253 struct task_struct *cur = current; 254 255 sp = regs->sp; 256 printk("CPU %d ", cpu); 257 __show_regs(regs, 1); 258 printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n", 259 cur->comm, cur->pid, task_thread_info(cur), cur); 260 261 /* 262 * When in-kernel, we also print out the stack and code at the 263 * time of the fault.. 264 */ 265 if (!user_mode(regs)) { 266 unsigned int code_prologue = code_bytes * 43 / 64; 267 unsigned int code_len = code_bytes; 268 unsigned char c; 269 u8 *ip; 270 271 printk(KERN_DEFAULT "Stack:\n"); 272 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 273 0, KERN_DEFAULT); 274 275 printk(KERN_DEFAULT "Code: "); 276 277 ip = (u8 *)regs->ip - code_prologue; 278 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { 279 /* try starting at IP */ 280 ip = (u8 *)regs->ip; 281 code_len = code_len - code_prologue + 1; 282 } 283 for (i = 0; i < code_len; i++, ip++) { 284 if (ip < (u8 *)PAGE_OFFSET || 285 probe_kernel_address(ip, c)) { 286 pr_cont(" Bad RIP value."); 287 break; 288 } 289 if (ip == (u8 *)regs->ip) 290 pr_cont("<%02x> ", c); 291 else 292 pr_cont("%02x ", c); 293 } 294 } 295 pr_cont("\n"); 296 } 297 298 int is_valid_bugaddr(unsigned long ip) 299 { 300 unsigned short ud2; 301 302 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) 303 return 0; 304 305 return ud2 == 0x0b0f; 306 } 307