1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 */ 5 #include <linux/kallsyms.h> 6 #include <linux/kprobes.h> 7 #include <linux/uaccess.h> 8 #include <linux/utsname.h> 9 #include <linux/hardirq.h> 10 #include <linux/kdebug.h> 11 #include <linux/module.h> 12 #include <linux/ptrace.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/ftrace.h> 16 #include <linux/kexec.h> 17 #include <linux/bug.h> 18 #include <linux/nmi.h> 19 #include <linux/sysfs.h> 20 #include <linux/kasan.h> 21 22 #include <asm/cpu_entry_area.h> 23 #include <asm/stacktrace.h> 24 #include <asm/unwind.h> 25 26 int panic_on_unrecovered_nmi; 27 int panic_on_io_nmi; 28 static int die_counter; 29 30 static struct pt_regs exec_summary_regs; 31 32 bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, 33 struct stack_info *info) 34 { 35 unsigned long *begin = task_stack_page(task); 36 unsigned long *end = task_stack_page(task) + THREAD_SIZE; 37 38 if (stack < begin || stack >= end) 39 return false; 40 41 info->type = STACK_TYPE_TASK; 42 info->begin = begin; 43 info->end = end; 44 info->next_sp = NULL; 45 46 return true; 47 } 48 49 /* Called from get_stack_info_noinstr - so must be noinstr too */ 50 bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) 51 { 52 struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); 53 54 void *begin = ss; 55 void *end = ss + 1; 56 57 if ((void *)stack < begin || (void *)stack >= end) 58 return false; 59 60 info->type = STACK_TYPE_ENTRY; 61 info->begin = begin; 62 info->end = end; 63 info->next_sp = NULL; 64 65 return true; 66 } 67 68 static void printk_stack_address(unsigned long address, int reliable, 69 const char *log_lvl) 70 { 71 touch_nmi_watchdog(); 72 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); 73 } 74 75 static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, 76 unsigned int nbytes) 77 { 78 if (!user_mode(regs)) 79 return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); 80 81 /* 82 * Make sure userspace isn't trying to trick us into dumping kernel 83 * memory by pointing the userspace instruction pointer at it. 84 */ 85 if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) 86 return -EINVAL; 87 88 return copy_from_user_nmi(buf, (void __user *)src, nbytes); 89 } 90 91 /* 92 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: 93 * 94 * In case where we don't have the exact kernel image (which, if we did, we can 95 * simply disassemble and navigate to the RIP), the purpose of the bigger 96 * prologue is to have more context and to be able to correlate the code from 97 * the different toolchains better. 98 * 99 * In addition, it helps in recreating the register allocation of the failing 100 * kernel and thus make sense of the register dump. 101 * 102 * What is more, the additional complication of a variable length insn arch like 103 * x86 warrants having longer byte sequence before rIP so that the disassembler 104 * can "sync" up properly and find instruction boundaries when decoding the 105 * opcode bytes. 106 * 107 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random 108 * guesstimate in attempt to achieve all of the above. 109 */ 110 void show_opcodes(struct pt_regs *regs, const char *loglvl) 111 { 112 #define PROLOGUE_SIZE 42 113 #define EPILOGUE_SIZE 21 114 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) 115 u8 opcodes[OPCODE_BUFSIZE]; 116 unsigned long prologue = regs->ip - PROLOGUE_SIZE; 117 118 if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { 119 printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n", 120 loglvl, prologue); 121 } else { 122 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" 123 __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes, 124 opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1); 125 } 126 } 127 128 void show_ip(struct pt_regs *regs, const char *loglvl) 129 { 130 #ifdef CONFIG_X86_32 131 printk("%sEIP: %pS\n", loglvl, (void *)regs->ip); 132 #else 133 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); 134 #endif 135 show_opcodes(regs, loglvl); 136 } 137 138 void show_iret_regs(struct pt_regs *regs, const char *log_lvl) 139 { 140 show_ip(regs, log_lvl); 141 printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss, 142 regs->sp, regs->flags); 143 } 144 145 static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, 146 bool partial, const char *log_lvl) 147 { 148 /* 149 * These on_stack() checks aren't strictly necessary: the unwind code 150 * has already validated the 'regs' pointer. The checks are done for 151 * ordering reasons: if the registers are on the next stack, we don't 152 * want to print them out yet. Otherwise they'll be shown as part of 153 * the wrong stack. Later, when show_trace_log_lvl() switches to the 154 * next stack, this function will be called again with the same regs so 155 * they can be printed in the right context. 156 */ 157 if (!partial && on_stack(info, regs, sizeof(*regs))) { 158 __show_regs(regs, SHOW_REGS_SHORT, log_lvl); 159 160 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, 161 IRET_FRAME_SIZE)) { 162 /* 163 * When an interrupt or exception occurs in entry code, the 164 * full pt_regs might not have been saved yet. In that case 165 * just print the iret frame. 166 */ 167 show_iret_regs(regs, log_lvl); 168 } 169 } 170 171 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 172 unsigned long *stack, const char *log_lvl) 173 { 174 struct unwind_state state; 175 struct stack_info stack_info = {0}; 176 unsigned long visit_mask = 0; 177 int graph_idx = 0; 178 bool partial = false; 179 180 printk("%sCall Trace:\n", log_lvl); 181 182 unwind_start(&state, task, regs, stack); 183 stack = stack ? : get_stack_pointer(task, regs); 184 regs = unwind_get_entry_regs(&state, &partial); 185 186 /* 187 * Iterate through the stacks, starting with the current stack pointer. 188 * Each stack has a pointer to the next one. 189 * 190 * x86-64 can have several stacks: 191 * - task stack 192 * - interrupt stack 193 * - HW exception stacks (double fault, nmi, debug, mce) 194 * - entry stack 195 * 196 * x86-32 can have up to four stacks: 197 * - task stack 198 * - softirq stack 199 * - hardirq stack 200 * - entry stack 201 */ 202 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { 203 const char *stack_name; 204 205 if (get_stack_info(stack, task, &stack_info, &visit_mask)) { 206 /* 207 * We weren't on a valid stack. It's possible that 208 * we overflowed a valid stack into a guard page. 209 * See if the next page up is valid so that we can 210 * generate some kind of backtrace if this happens. 211 */ 212 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); 213 if (get_stack_info(stack, task, &stack_info, &visit_mask)) 214 break; 215 } 216 217 stack_name = stack_type_name(stack_info.type); 218 if (stack_name) 219 printk("%s <%s>\n", log_lvl, stack_name); 220 221 if (regs) 222 show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); 223 224 /* 225 * Scan the stack, printing any text addresses we find. At the 226 * same time, follow proper stack frames with the unwinder. 227 * 228 * Addresses found during the scan which are not reported by 229 * the unwinder are considered to be additional clues which are 230 * sometimes useful for debugging and are prefixed with '?'. 231 * This also serves as a failsafe option in case the unwinder 232 * goes off in the weeds. 233 */ 234 for (; stack < stack_info.end; stack++) { 235 unsigned long real_addr; 236 int reliable = 0; 237 unsigned long addr = READ_ONCE_NOCHECK(*stack); 238 unsigned long *ret_addr_p = 239 unwind_get_return_address_ptr(&state); 240 241 if (!__kernel_text_address(addr)) 242 continue; 243 244 /* 245 * Don't print regs->ip again if it was already printed 246 * by show_regs_if_on_stack(). 247 */ 248 if (regs && stack == ®s->ip) 249 goto next; 250 251 if (stack == ret_addr_p) 252 reliable = 1; 253 254 /* 255 * When function graph tracing is enabled for a 256 * function, its return address on the stack is 257 * replaced with the address of an ftrace handler 258 * (return_to_handler). In that case, before printing 259 * the "real" address, we want to print the handler 260 * address as an "unreliable" hint that function graph 261 * tracing was involved. 262 */ 263 real_addr = ftrace_graph_ret_addr(task, &graph_idx, 264 addr, stack); 265 if (real_addr != addr) 266 printk_stack_address(addr, 0, log_lvl); 267 printk_stack_address(real_addr, reliable, log_lvl); 268 269 if (!reliable) 270 continue; 271 272 next: 273 /* 274 * Get the next frame from the unwinder. No need to 275 * check for an error: if anything goes wrong, the rest 276 * of the addresses will just be printed as unreliable. 277 */ 278 unwind_next_frame(&state); 279 280 /* if the frame has entry regs, print them */ 281 regs = unwind_get_entry_regs(&state, &partial); 282 if (regs) 283 show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); 284 } 285 286 if (stack_name) 287 printk("%s </%s>\n", log_lvl, stack_name); 288 } 289 } 290 291 void show_stack(struct task_struct *task, unsigned long *sp, 292 const char *loglvl) 293 { 294 task = task ? : current; 295 296 /* 297 * Stack frames below this one aren't interesting. Don't show them 298 * if we're printing for %current. 299 */ 300 if (!sp && task == current) 301 sp = get_stack_pointer(current, NULL); 302 303 show_trace_log_lvl(task, NULL, sp, loglvl); 304 } 305 306 void show_stack_regs(struct pt_regs *regs) 307 { 308 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); 309 } 310 311 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 312 static int die_owner = -1; 313 static unsigned int die_nest_count; 314 315 unsigned long oops_begin(void) 316 { 317 int cpu; 318 unsigned long flags; 319 320 oops_enter(); 321 322 /* racy, but better than risking deadlock. */ 323 raw_local_irq_save(flags); 324 cpu = smp_processor_id(); 325 if (!arch_spin_trylock(&die_lock)) { 326 if (cpu == die_owner) 327 /* nested oops. should stop eventually */; 328 else 329 arch_spin_lock(&die_lock); 330 } 331 die_nest_count++; 332 die_owner = cpu; 333 console_verbose(); 334 bust_spinlocks(1); 335 return flags; 336 } 337 NOKPROBE_SYMBOL(oops_begin); 338 339 void __noreturn rewind_stack_do_exit(int signr); 340 341 void oops_end(unsigned long flags, struct pt_regs *regs, int signr) 342 { 343 if (regs && kexec_should_crash(current)) 344 crash_kexec(regs); 345 346 bust_spinlocks(0); 347 die_owner = -1; 348 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 349 die_nest_count--; 350 if (!die_nest_count) 351 /* Nest count reaches zero, release the lock. */ 352 arch_spin_unlock(&die_lock); 353 raw_local_irq_restore(flags); 354 oops_exit(); 355 356 /* Executive summary in case the oops scrolled away */ 357 __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT); 358 359 if (!signr) 360 return; 361 if (in_interrupt()) 362 panic("Fatal exception in interrupt"); 363 if (panic_on_oops) 364 panic("Fatal exception"); 365 366 /* 367 * We're not going to return, but we might be on an IST stack or 368 * have very little stack space left. Rewind the stack and kill 369 * the task. 370 * Before we rewind the stack, we have to tell KASAN that we're going to 371 * reuse the task stack and that existing poisons are invalid. 372 */ 373 kasan_unpoison_task_stack(current); 374 rewind_stack_do_exit(signr); 375 } 376 NOKPROBE_SYMBOL(oops_end); 377 378 static void __die_header(const char *str, struct pt_regs *regs, long err) 379 { 380 const char *pr = ""; 381 382 /* Save the regs of the first oops for the executive summary later. */ 383 if (!die_counter) 384 exec_summary_regs = *regs; 385 386 if (IS_ENABLED(CONFIG_PREEMPTION)) 387 pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; 388 389 printk(KERN_DEFAULT 390 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, 391 pr, 392 IS_ENABLED(CONFIG_SMP) ? " SMP" : "", 393 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", 394 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", 395 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? 396 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); 397 } 398 NOKPROBE_SYMBOL(__die_header); 399 400 static int __die_body(const char *str, struct pt_regs *regs, long err) 401 { 402 show_regs(regs); 403 print_modules(); 404 405 if (notify_die(DIE_OOPS, str, regs, err, 406 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) 407 return 1; 408 409 return 0; 410 } 411 NOKPROBE_SYMBOL(__die_body); 412 413 int __die(const char *str, struct pt_regs *regs, long err) 414 { 415 __die_header(str, regs, err); 416 return __die_body(str, regs, err); 417 } 418 NOKPROBE_SYMBOL(__die); 419 420 /* 421 * This is gone through when something in the kernel has done something bad 422 * and is about to be terminated: 423 */ 424 void die(const char *str, struct pt_regs *regs, long err) 425 { 426 unsigned long flags = oops_begin(); 427 int sig = SIGSEGV; 428 429 if (__die(str, regs, err)) 430 sig = 0; 431 oops_end(flags, regs, sig); 432 } 433 434 void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr) 435 { 436 unsigned long flags = oops_begin(); 437 int sig = SIGSEGV; 438 439 __die_header(str, regs, err); 440 if (gp_addr) 441 kasan_non_canonical_hook(gp_addr); 442 if (__die_body(str, regs, err)) 443 sig = 0; 444 oops_end(flags, regs, sig); 445 } 446 447 void show_regs(struct pt_regs *regs) 448 { 449 enum show_regs_mode print_kernel_regs; 450 451 show_regs_print_info(KERN_DEFAULT); 452 453 print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL; 454 __show_regs(regs, print_kernel_regs, KERN_DEFAULT); 455 456 /* 457 * When in-kernel, we also print out the stack at the time of the fault.. 458 */ 459 if (!user_mode(regs)) 460 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); 461 } 462