1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/spinlock.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/kdebug.h> 22 #include <linux/kgdb.h> 23 #include <linux/kernel.h> 24 #include <linux/export.h> 25 #include <linux/ptrace.h> 26 #include <linux/uprobes.h> 27 #include <linux/string.h> 28 #include <linux/delay.h> 29 #include <linux/errno.h> 30 #include <linux/kexec.h> 31 #include <linux/sched.h> 32 #include <linux/sched/task_stack.h> 33 #include <linux/timer.h> 34 #include <linux/init.h> 35 #include <linux/bug.h> 36 #include <linux/nmi.h> 37 #include <linux/mm.h> 38 #include <linux/smp.h> 39 #include <linux/io.h> 40 #include <linux/hardirq.h> 41 #include <linux/atomic.h> 42 43 #include <asm/stacktrace.h> 44 #include <asm/processor.h> 45 #include <asm/debugreg.h> 46 #include <asm/realmode.h> 47 #include <asm/text-patching.h> 48 #include <asm/ftrace.h> 49 #include <asm/traps.h> 50 #include <asm/desc.h> 51 #include <asm/fpu/internal.h> 52 #include <asm/cpu.h> 53 #include <asm/cpu_entry_area.h> 54 #include <asm/mce.h> 55 #include <asm/fixmap.h> 56 #include <asm/mach_traps.h> 57 #include <asm/alternative.h> 58 #include <asm/fpu/xstate.h> 59 #include <asm/vm86.h> 60 #include <asm/umip.h> 61 #include <asm/insn.h> 62 #include <asm/insn-eval.h> 63 64 #ifdef CONFIG_X86_64 65 #include <asm/x86_init.h> 66 #include <asm/proto.h> 67 #else 68 #include <asm/processor-flags.h> 69 #include <asm/setup.h> 70 #include <asm/proto.h> 71 #endif 72 73 DECLARE_BITMAP(system_vectors, NR_VECTORS); 74 75 static inline void cond_local_irq_enable(struct pt_regs *regs) 76 { 77 if (regs->flags & X86_EFLAGS_IF) 78 local_irq_enable(); 79 } 80 81 static inline void cond_local_irq_disable(struct pt_regs *regs) 82 { 83 if (regs->flags & X86_EFLAGS_IF) 84 local_irq_disable(); 85 } 86 87 __always_inline int is_valid_bugaddr(unsigned long addr) 88 { 89 if (addr < TASK_SIZE_MAX) 90 return 0; 91 92 /* 93 * We got #UD, if the text isn't readable we'd have gotten 94 * a different exception. 95 */ 96 return *(unsigned short *)addr == INSN_UD2; 97 } 98 99 static nokprobe_inline int 100 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, 101 struct pt_regs *regs, long error_code) 102 { 103 if (v8086_mode(regs)) { 104 /* 105 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 106 * On nmi (interrupt 2), do_trap should not be called. 107 */ 108 if (trapnr < X86_TRAP_UD) { 109 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 110 error_code, trapnr)) 111 return 0; 112 } 113 } else if (!user_mode(regs)) { 114 if (fixup_exception(regs, trapnr, error_code, 0)) 115 return 0; 116 117 tsk->thread.error_code = error_code; 118 tsk->thread.trap_nr = trapnr; 119 die(str, regs, error_code); 120 } 121 122 /* 123 * We want error_code and trap_nr set for userspace faults and 124 * kernelspace faults which result in die(), but not 125 * kernelspace faults which are fixed up. die() gives the 126 * process no chance to handle the signal and notice the 127 * kernel fault information, so that won't result in polluting 128 * the information about previously queued, but not yet 129 * delivered, faults. See also exc_general_protection below. 130 */ 131 tsk->thread.error_code = error_code; 132 tsk->thread.trap_nr = trapnr; 133 134 return -1; 135 } 136 137 static void show_signal(struct task_struct *tsk, int signr, 138 const char *type, const char *desc, 139 struct pt_regs *regs, long error_code) 140 { 141 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 142 printk_ratelimit()) { 143 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx", 144 tsk->comm, task_pid_nr(tsk), type, desc, 145 regs->ip, regs->sp, error_code); 146 print_vma_addr(KERN_CONT " in ", regs->ip); 147 pr_cont("\n"); 148 } 149 } 150 151 static void 152 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 153 long error_code, int sicode, void __user *addr) 154 { 155 struct task_struct *tsk = current; 156 157 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 158 return; 159 160 show_signal(tsk, signr, "trap ", str, regs, error_code); 161 162 if (!sicode) 163 force_sig(signr); 164 else 165 force_sig_fault(signr, sicode, addr); 166 } 167 NOKPROBE_SYMBOL(do_trap); 168 169 static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 170 unsigned long trapnr, int signr, int sicode, void __user *addr) 171 { 172 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); 173 174 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != 175 NOTIFY_STOP) { 176 cond_local_irq_enable(regs); 177 do_trap(trapnr, signr, str, regs, error_code, sicode, addr); 178 cond_local_irq_disable(regs); 179 } 180 } 181 182 /* 183 * Posix requires to provide the address of the faulting instruction for 184 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t. 185 * 186 * This address is usually regs->ip, but when an uprobe moved the code out 187 * of line then regs->ip points to the XOL code which would confuse 188 * anything which analyzes the fault address vs. the unmodified binary. If 189 * a trap happened in XOL code then uprobe maps regs->ip back to the 190 * original instruction address. 191 */ 192 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs) 193 { 194 return (void __user *)uprobe_get_trap_addr(regs); 195 } 196 197 DEFINE_IDTENTRY(exc_divide_error) 198 { 199 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, 200 FPE_INTDIV, error_get_trap_addr(regs)); 201 } 202 203 DEFINE_IDTENTRY(exc_overflow) 204 { 205 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL); 206 } 207 208 #ifdef CONFIG_X86_F00F_BUG 209 void handle_invalid_op(struct pt_regs *regs) 210 #else 211 static inline void handle_invalid_op(struct pt_regs *regs) 212 #endif 213 { 214 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL, 215 ILL_ILLOPN, error_get_trap_addr(regs)); 216 } 217 218 static noinstr bool handle_bug(struct pt_regs *regs) 219 { 220 bool handled = false; 221 222 if (!is_valid_bugaddr(regs->ip)) 223 return handled; 224 225 /* 226 * All lies, just get the WARN/BUG out. 227 */ 228 instrumentation_begin(); 229 /* 230 * Since we're emulating a CALL with exceptions, restore the interrupt 231 * state to what it was at the exception site. 232 */ 233 if (regs->flags & X86_EFLAGS_IF) 234 raw_local_irq_enable(); 235 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { 236 regs->ip += LEN_UD2; 237 handled = true; 238 } 239 if (regs->flags & X86_EFLAGS_IF) 240 raw_local_irq_disable(); 241 instrumentation_end(); 242 243 return handled; 244 } 245 246 DEFINE_IDTENTRY_RAW(exc_invalid_op) 247 { 248 irqentry_state_t state; 249 250 /* 251 * We use UD2 as a short encoding for 'CALL __WARN', as such 252 * handle it before exception entry to avoid recursive WARN 253 * in case exception entry is the one triggering WARNs. 254 */ 255 if (!user_mode(regs) && handle_bug(regs)) 256 return; 257 258 state = irqentry_enter(regs); 259 instrumentation_begin(); 260 handle_invalid_op(regs); 261 instrumentation_end(); 262 irqentry_exit(regs, state); 263 } 264 265 DEFINE_IDTENTRY(exc_coproc_segment_overrun) 266 { 267 do_error_trap(regs, 0, "coprocessor segment overrun", 268 X86_TRAP_OLD_MF, SIGFPE, 0, NULL); 269 } 270 271 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss) 272 { 273 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV, 274 0, NULL); 275 } 276 277 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present) 278 { 279 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP, 280 SIGBUS, 0, NULL); 281 } 282 283 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment) 284 { 285 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS, 286 0, NULL); 287 } 288 289 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check) 290 { 291 char *str = "alignment check"; 292 293 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) 294 return; 295 296 if (!user_mode(regs)) 297 die("Split lock detected\n", regs, error_code); 298 299 local_irq_enable(); 300 301 if (handle_user_split_lock(regs, error_code)) 302 return; 303 304 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs, 305 error_code, BUS_ADRALN, NULL); 306 307 local_irq_disable(); 308 } 309 310 #ifdef CONFIG_VMAP_STACK 311 __visible void __noreturn handle_stack_overflow(const char *message, 312 struct pt_regs *regs, 313 unsigned long fault_address) 314 { 315 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n", 316 (void *)fault_address, current->stack, 317 (char *)current->stack + THREAD_SIZE - 1); 318 die(message, regs, 0); 319 320 /* Be absolutely certain we don't return. */ 321 panic("%s", message); 322 } 323 #endif 324 325 /* 326 * Runs on an IST stack for x86_64 and on a special task stack for x86_32. 327 * 328 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the 329 * SDM's warnings about double faults being unrecoverable, returning works as 330 * expected. Presumably what the SDM actually means is that the CPU may get 331 * the register state wrong on entry, so returning could be a bad idea. 332 * 333 * Various CPU engineers have promised that double faults due to an IRET fault 334 * while the stack is read-only are, in fact, recoverable. 335 * 336 * On x86_32, this is entered through a task gate, and regs are synthesized 337 * from the TSS. Returning is, in principle, okay, but changes to regs will 338 * be lost. If, for some reason, we need to return to a context with modified 339 * regs, the shim code could be adjusted to synchronize the registers. 340 * 341 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs 342 * to be read before doing anything else. 343 */ 344 DEFINE_IDTENTRY_DF(exc_double_fault) 345 { 346 static const char str[] = "double fault"; 347 struct task_struct *tsk = current; 348 349 #ifdef CONFIG_VMAP_STACK 350 unsigned long address = read_cr2(); 351 #endif 352 353 #ifdef CONFIG_X86_ESPFIX64 354 extern unsigned char native_irq_return_iret[]; 355 356 /* 357 * If IRET takes a non-IST fault on the espfix64 stack, then we 358 * end up promoting it to a doublefault. In that case, take 359 * advantage of the fact that we're not using the normal (TSS.sp0) 360 * stack right now. We can write a fake #GP(0) frame at TSS.sp0 361 * and then modify our own IRET frame so that, when we return, 362 * we land directly at the #GP(0) vector with the stack already 363 * set up according to its expectations. 364 * 365 * The net result is that our #GP handler will think that we 366 * entered from usermode with the bad user context. 367 * 368 * No need for nmi_enter() here because we don't use RCU. 369 */ 370 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && 371 regs->cs == __KERNEL_CS && 372 regs->ip == (unsigned long)native_irq_return_iret) 373 { 374 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; 375 unsigned long *p = (unsigned long *)regs->sp; 376 377 /* 378 * regs->sp points to the failing IRET frame on the 379 * ESPFIX64 stack. Copy it to the entry stack. This fills 380 * in gpregs->ss through gpregs->ip. 381 * 382 */ 383 gpregs->ip = p[0]; 384 gpregs->cs = p[1]; 385 gpregs->flags = p[2]; 386 gpregs->sp = p[3]; 387 gpregs->ss = p[4]; 388 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ 389 390 /* 391 * Adjust our frame so that we return straight to the #GP 392 * vector with the expected RSP value. This is safe because 393 * we won't enable interupts or schedule before we invoke 394 * general_protection, so nothing will clobber the stack 395 * frame we just set up. 396 * 397 * We will enter general_protection with kernel GSBASE, 398 * which is what the stub expects, given that the faulting 399 * RIP will be the IRET instruction. 400 */ 401 regs->ip = (unsigned long)asm_exc_general_protection; 402 regs->sp = (unsigned long)&gpregs->orig_ax; 403 404 return; 405 } 406 #endif 407 408 idtentry_enter_nmi(regs); 409 instrumentation_begin(); 410 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 411 412 tsk->thread.error_code = error_code; 413 tsk->thread.trap_nr = X86_TRAP_DF; 414 415 #ifdef CONFIG_VMAP_STACK 416 /* 417 * If we overflow the stack into a guard page, the CPU will fail 418 * to deliver #PF and will send #DF instead. Similarly, if we 419 * take any non-IST exception while too close to the bottom of 420 * the stack, the processor will get a page fault while 421 * delivering the exception and will generate a double fault. 422 * 423 * According to the SDM (footnote in 6.15 under "Interrupt 14 - 424 * Page-Fault Exception (#PF): 425 * 426 * Processors update CR2 whenever a page fault is detected. If a 427 * second page fault occurs while an earlier page fault is being 428 * delivered, the faulting linear address of the second fault will 429 * overwrite the contents of CR2 (replacing the previous 430 * address). These updates to CR2 occur even if the page fault 431 * results in a double fault or occurs during the delivery of a 432 * double fault. 433 * 434 * The logic below has a small possibility of incorrectly diagnosing 435 * some errors as stack overflows. For example, if the IDT or GDT 436 * gets corrupted such that #GP delivery fails due to a bad descriptor 437 * causing #GP and we hit this condition while CR2 coincidentally 438 * points to the stack guard page, we'll think we overflowed the 439 * stack. Given that we're going to panic one way or another 440 * if this happens, this isn't necessarily worth fixing. 441 * 442 * If necessary, we could improve the test by only diagnosing 443 * a stack overflow if the saved RSP points within 47 bytes of 444 * the bottom of the stack: if RSP == tsk_stack + 48 and we 445 * take an exception, the stack is already aligned and there 446 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a 447 * possible error code, so a stack overflow would *not* double 448 * fault. With any less space left, exception delivery could 449 * fail, and, as a practical matter, we've overflowed the 450 * stack even if the actual trigger for the double fault was 451 * something else. 452 */ 453 if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) { 454 handle_stack_overflow("kernel stack overflow (double-fault)", 455 regs, address); 456 } 457 #endif 458 459 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); 460 die("double fault", regs, error_code); 461 panic("Machine halted."); 462 instrumentation_end(); 463 } 464 465 DEFINE_IDTENTRY(exc_bounds) 466 { 467 if (notify_die(DIE_TRAP, "bounds", regs, 0, 468 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) 469 return; 470 cond_local_irq_enable(regs); 471 472 if (!user_mode(regs)) 473 die("bounds", regs, 0); 474 475 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL); 476 477 cond_local_irq_disable(regs); 478 } 479 480 enum kernel_gp_hint { 481 GP_NO_HINT, 482 GP_NON_CANONICAL, 483 GP_CANONICAL 484 }; 485 486 /* 487 * When an uncaught #GP occurs, try to determine the memory address accessed by 488 * the instruction and return that address to the caller. Also, try to figure 489 * out whether any part of the access to that address was non-canonical. 490 */ 491 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, 492 unsigned long *addr) 493 { 494 u8 insn_buf[MAX_INSN_SIZE]; 495 struct insn insn; 496 497 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, 498 MAX_INSN_SIZE)) 499 return GP_NO_HINT; 500 501 kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); 502 insn_get_modrm(&insn); 503 insn_get_sib(&insn); 504 505 *addr = (unsigned long)insn_get_addr_ref(&insn, regs); 506 if (*addr == -1UL) 507 return GP_NO_HINT; 508 509 #ifdef CONFIG_X86_64 510 /* 511 * Check that: 512 * - the operand is not in the kernel half 513 * - the last byte of the operand is not in the user canonical half 514 */ 515 if (*addr < ~__VIRTUAL_MASK && 516 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) 517 return GP_NON_CANONICAL; 518 #endif 519 520 return GP_CANONICAL; 521 } 522 523 #define GPFSTR "general protection fault" 524 525 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) 526 { 527 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR; 528 enum kernel_gp_hint hint = GP_NO_HINT; 529 struct task_struct *tsk; 530 unsigned long gp_addr; 531 int ret; 532 533 cond_local_irq_enable(regs); 534 535 if (static_cpu_has(X86_FEATURE_UMIP)) { 536 if (user_mode(regs) && fixup_umip_exception(regs)) 537 goto exit; 538 } 539 540 if (v8086_mode(regs)) { 541 local_irq_enable(); 542 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 543 local_irq_disable(); 544 return; 545 } 546 547 tsk = current; 548 549 if (user_mode(regs)) { 550 tsk->thread.error_code = error_code; 551 tsk->thread.trap_nr = X86_TRAP_GP; 552 553 show_signal(tsk, SIGSEGV, "", desc, regs, error_code); 554 force_sig(SIGSEGV); 555 goto exit; 556 } 557 558 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0)) 559 goto exit; 560 561 tsk->thread.error_code = error_code; 562 tsk->thread.trap_nr = X86_TRAP_GP; 563 564 /* 565 * To be potentially processing a kprobe fault and to trust the result 566 * from kprobe_running(), we have to be non-preemptible. 567 */ 568 if (!preemptible() && 569 kprobe_running() && 570 kprobe_fault_handler(regs, X86_TRAP_GP)) 571 goto exit; 572 573 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV); 574 if (ret == NOTIFY_STOP) 575 goto exit; 576 577 if (error_code) 578 snprintf(desc, sizeof(desc), "segment-related " GPFSTR); 579 else 580 hint = get_kernel_gp_address(regs, &gp_addr); 581 582 if (hint != GP_NO_HINT) 583 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx", 584 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address" 585 : "maybe for address", 586 gp_addr); 587 588 /* 589 * KASAN is interested only in the non-canonical case, clear it 590 * otherwise. 591 */ 592 if (hint != GP_NON_CANONICAL) 593 gp_addr = 0; 594 595 die_addr(desc, regs, error_code, gp_addr); 596 597 exit: 598 cond_local_irq_disable(regs); 599 } 600 601 static bool do_int3(struct pt_regs *regs) 602 { 603 int res; 604 605 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 606 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, 607 SIGTRAP) == NOTIFY_STOP) 608 return true; 609 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 610 611 #ifdef CONFIG_KPROBES 612 if (kprobe_int3_handler(regs)) 613 return true; 614 #endif 615 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP); 616 617 return res == NOTIFY_STOP; 618 } 619 620 static void do_int3_user(struct pt_regs *regs) 621 { 622 if (do_int3(regs)) 623 return; 624 625 cond_local_irq_enable(regs); 626 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL); 627 cond_local_irq_disable(regs); 628 } 629 630 DEFINE_IDTENTRY_RAW(exc_int3) 631 { 632 /* 633 * poke_int3_handler() is completely self contained code; it does (and 634 * must) *NOT* call out to anything, lest it hits upon yet another 635 * INT3. 636 */ 637 if (poke_int3_handler(regs)) 638 return; 639 640 /* 641 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() 642 * and therefore can trigger INT3, hence poke_int3_handler() must 643 * be done before. If the entry came from kernel mode, then use 644 * nmi_enter() because the INT3 could have been hit in any context 645 * including NMI. 646 */ 647 if (user_mode(regs)) { 648 irqentry_enter_from_user_mode(regs); 649 instrumentation_begin(); 650 do_int3_user(regs); 651 instrumentation_end(); 652 irqentry_exit_to_user_mode(regs); 653 } else { 654 bool irq_state = idtentry_enter_nmi(regs); 655 instrumentation_begin(); 656 if (!do_int3(regs)) 657 die("int3", regs, 0); 658 instrumentation_end(); 659 idtentry_exit_nmi(regs, irq_state); 660 } 661 } 662 663 #ifdef CONFIG_X86_64 664 /* 665 * Help handler running on a per-cpu (IST or entry trampoline) stack 666 * to switch to the normal thread stack if the interrupted code was in 667 * user mode. The actual stack switch is done in entry_64.S 668 */ 669 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs) 670 { 671 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; 672 if (regs != eregs) 673 *regs = *eregs; 674 return regs; 675 } 676 677 #ifdef CONFIG_AMD_MEM_ENCRYPT 678 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs) 679 { 680 unsigned long sp, *stack; 681 struct stack_info info; 682 struct pt_regs *regs_ret; 683 684 /* 685 * In the SYSCALL entry path the RSP value comes from user-space - don't 686 * trust it and switch to the current kernel stack 687 */ 688 if (regs->ip >= (unsigned long)entry_SYSCALL_64 && 689 regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) { 690 sp = this_cpu_read(cpu_current_top_of_stack); 691 goto sync; 692 } 693 694 /* 695 * From here on the RSP value is trusted. Now check whether entry 696 * happened from a safe stack. Not safe are the entry or unknown stacks, 697 * use the fall-back stack instead in this case. 698 */ 699 sp = regs->sp; 700 stack = (unsigned long *)sp; 701 702 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY || 703 info.type >= STACK_TYPE_EXCEPTION_LAST) 704 sp = __this_cpu_ist_top_va(VC2); 705 706 sync: 707 /* 708 * Found a safe stack - switch to it as if the entry didn't happen via 709 * IST stack. The code below only copies pt_regs, the real switch happens 710 * in assembly code. 711 */ 712 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret); 713 714 regs_ret = (struct pt_regs *)sp; 715 *regs_ret = *regs; 716 717 return regs_ret; 718 } 719 #endif 720 721 struct bad_iret_stack { 722 void *error_entry_ret; 723 struct pt_regs regs; 724 }; 725 726 asmlinkage __visible noinstr 727 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) 728 { 729 /* 730 * This is called from entry_64.S early in handling a fault 731 * caused by a bad iret to user mode. To handle the fault 732 * correctly, we want to move our stack frame to where it would 733 * be had we entered directly on the entry stack (rather than 734 * just below the IRET frame) and we want to pretend that the 735 * exception came from the IRET target. 736 */ 737 struct bad_iret_stack tmp, *new_stack = 738 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; 739 740 /* Copy the IRET target to the temporary storage. */ 741 __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); 742 743 /* Copy the remainder of the stack from the current stack. */ 744 __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); 745 746 /* Update the entry stack */ 747 __memcpy(new_stack, &tmp, sizeof(tmp)); 748 749 BUG_ON(!user_mode(&new_stack->regs)); 750 return new_stack; 751 } 752 #endif 753 754 static bool is_sysenter_singlestep(struct pt_regs *regs) 755 { 756 /* 757 * We don't try for precision here. If we're anywhere in the region of 758 * code that can be single-stepped in the SYSENTER entry path, then 759 * assume that this is a useless single-step trap due to SYSENTER 760 * being invoked with TF set. (We don't know in advance exactly 761 * which instructions will be hit because BTF could plausibly 762 * be set.) 763 */ 764 #ifdef CONFIG_X86_32 765 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < 766 (unsigned long)__end_SYSENTER_singlestep_region - 767 (unsigned long)__begin_SYSENTER_singlestep_region; 768 #elif defined(CONFIG_IA32_EMULATION) 769 return (regs->ip - (unsigned long)entry_SYSENTER_compat) < 770 (unsigned long)__end_entry_SYSENTER_compat - 771 (unsigned long)entry_SYSENTER_compat; 772 #else 773 return false; 774 #endif 775 } 776 777 static __always_inline unsigned long debug_read_clear_dr6(void) 778 { 779 unsigned long dr6; 780 781 /* 782 * The Intel SDM says: 783 * 784 * Certain debug exceptions may clear bits 0-3. The remaining 785 * contents of the DR6 register are never cleared by the 786 * processor. To avoid confusion in identifying debug 787 * exceptions, debug handlers should clear the register before 788 * returning to the interrupted task. 789 * 790 * Keep it simple: clear DR6 immediately. 791 */ 792 get_debugreg(dr6, 6); 793 set_debugreg(DR6_RESERVED, 6); 794 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ 795 796 return dr6; 797 } 798 799 /* 800 * Our handling of the processor debug registers is non-trivial. 801 * We do not clear them on entry and exit from the kernel. Therefore 802 * it is possible to get a watchpoint trap here from inside the kernel. 803 * However, the code in ./ptrace.c has ensured that the user can 804 * only set watchpoints on userspace addresses. Therefore the in-kernel 805 * watchpoint trap can only occur in code which is reading/writing 806 * from user space. Such code must not hold kernel locks (since it 807 * can equally take a page fault), therefore it is safe to call 808 * force_sig_info even though that claims and releases locks. 809 * 810 * Code in ./signal.c ensures that the debug control register 811 * is restored before we deliver any signal, and therefore that 812 * user code runs with the correct debug control register even though 813 * we clear it here. 814 * 815 * Being careful here means that we don't have to be as careful in a 816 * lot of more complicated places (task switching can be a bit lazy 817 * about restoring all the debug state, and ptrace doesn't have to 818 * find every occurrence of the TF bit that could be saved away even 819 * by user code) 820 * 821 * May run on IST stack. 822 */ 823 824 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6) 825 { 826 /* 827 * Notifiers will clear bits in @dr6 to indicate the event has been 828 * consumed - hw_breakpoint_handler(), single_stop_cont(). 829 * 830 * Notifiers will set bits in @virtual_dr6 to indicate the desire 831 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler(). 832 */ 833 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP) 834 return true; 835 836 return false; 837 } 838 839 static __always_inline void exc_debug_kernel(struct pt_regs *regs, 840 unsigned long dr6) 841 { 842 /* 843 * Disable breakpoints during exception handling; recursive exceptions 844 * are exceedingly 'fun'. 845 * 846 * Since this function is NOKPROBE, and that also applies to 847 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a 848 * HW_BREAKPOINT_W on our stack) 849 * 850 * Entry text is excluded for HW_BP_X and cpu_entry_area, which 851 * includes the entry stack is excluded for everything. 852 */ 853 unsigned long dr7 = local_db_save(); 854 bool irq_state = idtentry_enter_nmi(regs); 855 instrumentation_begin(); 856 857 /* 858 * If something gets miswired and we end up here for a user mode 859 * #DB, we will malfunction. 860 */ 861 WARN_ON_ONCE(user_mode(regs)); 862 863 if (test_thread_flag(TIF_BLOCKSTEP)) { 864 /* 865 * The SDM says "The processor clears the BTF flag when it 866 * generates a debug exception." but PTRACE_BLOCKSTEP requested 867 * it for userspace, but we just took a kernel #DB, so re-set 868 * BTF. 869 */ 870 unsigned long debugctl; 871 872 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 873 debugctl |= DEBUGCTLMSR_BTF; 874 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 875 } 876 877 /* 878 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a 879 * watchpoint at the same time then that will still be handled. 880 */ 881 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs)) 882 dr6 &= ~DR_STEP; 883 884 if (kprobe_debug_handler(regs)) 885 goto out; 886 887 /* 888 * The kernel doesn't use INT1 889 */ 890 if (!dr6) 891 goto out; 892 893 if (notify_debug(regs, &dr6)) 894 goto out; 895 896 /* 897 * The kernel doesn't use TF single-step outside of: 898 * 899 * - Kprobes, consumed through kprobe_debug_handler() 900 * - KGDB, consumed through notify_debug() 901 * 902 * So if we get here with DR_STEP set, something is wonky. 903 * 904 * A known way to trigger this is through QEMU's GDB stub, 905 * which leaks #DB into the guest and causes IST recursion. 906 */ 907 if (WARN_ON_ONCE(dr6 & DR_STEP)) 908 regs->flags &= ~X86_EFLAGS_TF; 909 out: 910 instrumentation_end(); 911 idtentry_exit_nmi(regs, irq_state); 912 913 local_db_restore(dr7); 914 } 915 916 static __always_inline void exc_debug_user(struct pt_regs *regs, 917 unsigned long dr6) 918 { 919 bool icebp; 920 921 /* 922 * If something gets miswired and we end up here for a kernel mode 923 * #DB, we will malfunction. 924 */ 925 WARN_ON_ONCE(!user_mode(regs)); 926 927 /* 928 * NB: We can't easily clear DR7 here because 929 * idtentry_exit_to_usermode() can invoke ptrace, schedule, access 930 * user memory, etc. This means that a recursive #DB is possible. If 931 * this happens, that #DB will hit exc_debug_kernel() and clear DR7. 932 * Since we're not on the IST stack right now, everything will be 933 * fine. 934 */ 935 936 irqentry_enter_from_user_mode(regs); 937 instrumentation_begin(); 938 939 /* 940 * Start the virtual/ptrace DR6 value with just the DR_STEP mask 941 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits. 942 * 943 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6) 944 * even if it is not the result of PTRACE_SINGLESTEP. 945 */ 946 current->thread.virtual_dr6 = (dr6 & DR_STEP); 947 948 /* 949 * The SDM says "The processor clears the BTF flag when it 950 * generates a debug exception." Clear TIF_BLOCKSTEP to keep 951 * TIF_BLOCKSTEP in sync with the hardware BTF flag. 952 */ 953 clear_thread_flag(TIF_BLOCKSTEP); 954 955 /* 956 * If dr6 has no reason to give us about the origin of this trap, 957 * then it's very likely the result of an icebp/int01 trap. 958 * User wants a sigtrap for that. 959 */ 960 icebp = !dr6; 961 962 if (notify_debug(regs, &dr6)) 963 goto out; 964 965 /* It's safe to allow irq's after DR6 has been saved */ 966 local_irq_enable(); 967 968 if (v8086_mode(regs)) { 969 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB); 970 goto out_irq; 971 } 972 973 /* Add the virtual_dr6 bits for signals. */ 974 dr6 |= current->thread.virtual_dr6; 975 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp) 976 send_sigtrap(regs, 0, get_si_code(dr6)); 977 978 out_irq: 979 local_irq_disable(); 980 out: 981 instrumentation_end(); 982 irqentry_exit_to_user_mode(regs); 983 } 984 985 #ifdef CONFIG_X86_64 986 /* IST stack entry */ 987 DEFINE_IDTENTRY_DEBUG(exc_debug) 988 { 989 exc_debug_kernel(regs, debug_read_clear_dr6()); 990 } 991 992 /* User entry, runs on regular task stack */ 993 DEFINE_IDTENTRY_DEBUG_USER(exc_debug) 994 { 995 exc_debug_user(regs, debug_read_clear_dr6()); 996 } 997 #else 998 /* 32 bit does not have separate entry points. */ 999 DEFINE_IDTENTRY_RAW(exc_debug) 1000 { 1001 unsigned long dr6 = debug_read_clear_dr6(); 1002 1003 if (user_mode(regs)) 1004 exc_debug_user(regs, dr6); 1005 else 1006 exc_debug_kernel(regs, dr6); 1007 } 1008 #endif 1009 1010 /* 1011 * Note that we play around with the 'TS' bit in an attempt to get 1012 * the correct behaviour even in the presence of the asynchronous 1013 * IRQ13 behaviour 1014 */ 1015 static void math_error(struct pt_regs *regs, int trapnr) 1016 { 1017 struct task_struct *task = current; 1018 struct fpu *fpu = &task->thread.fpu; 1019 int si_code; 1020 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 1021 "simd exception"; 1022 1023 cond_local_irq_enable(regs); 1024 1025 if (!user_mode(regs)) { 1026 if (fixup_exception(regs, trapnr, 0, 0)) 1027 goto exit; 1028 1029 task->thread.error_code = 0; 1030 task->thread.trap_nr = trapnr; 1031 1032 if (notify_die(DIE_TRAP, str, regs, 0, trapnr, 1033 SIGFPE) != NOTIFY_STOP) 1034 die(str, regs, 0); 1035 goto exit; 1036 } 1037 1038 /* 1039 * Save the info for the exception handler and clear the error. 1040 */ 1041 fpu__save(fpu); 1042 1043 task->thread.trap_nr = trapnr; 1044 task->thread.error_code = 0; 1045 1046 si_code = fpu__exception_code(fpu, trapnr); 1047 /* Retry when we get spurious exceptions: */ 1048 if (!si_code) 1049 goto exit; 1050 1051 force_sig_fault(SIGFPE, si_code, 1052 (void __user *)uprobe_get_trap_addr(regs)); 1053 exit: 1054 cond_local_irq_disable(regs); 1055 } 1056 1057 DEFINE_IDTENTRY(exc_coprocessor_error) 1058 { 1059 math_error(regs, X86_TRAP_MF); 1060 } 1061 1062 DEFINE_IDTENTRY(exc_simd_coprocessor_error) 1063 { 1064 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) { 1065 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */ 1066 if (!static_cpu_has(X86_FEATURE_XMM)) { 1067 __exc_general_protection(regs, 0); 1068 return; 1069 } 1070 } 1071 math_error(regs, X86_TRAP_XF); 1072 } 1073 1074 DEFINE_IDTENTRY(exc_spurious_interrupt_bug) 1075 { 1076 /* 1077 * This addresses a Pentium Pro Erratum: 1078 * 1079 * PROBLEM: If the APIC subsystem is configured in mixed mode with 1080 * Virtual Wire mode implemented through the local APIC, an 1081 * interrupt vector of 0Fh (Intel reserved encoding) may be 1082 * generated by the local APIC (Int 15). This vector may be 1083 * generated upon receipt of a spurious interrupt (an interrupt 1084 * which is removed before the system receives the INTA sequence) 1085 * instead of the programmed 8259 spurious interrupt vector. 1086 * 1087 * IMPLICATION: The spurious interrupt vector programmed in the 1088 * 8259 is normally handled by an operating system's spurious 1089 * interrupt handler. However, a vector of 0Fh is unknown to some 1090 * operating systems, which would crash if this erratum occurred. 1091 * 1092 * In theory this could be limited to 32bit, but the handler is not 1093 * hurting and who knows which other CPUs suffer from this. 1094 */ 1095 } 1096 1097 DEFINE_IDTENTRY(exc_device_not_available) 1098 { 1099 unsigned long cr0 = read_cr0(); 1100 1101 #ifdef CONFIG_MATH_EMULATION 1102 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) { 1103 struct math_emu_info info = { }; 1104 1105 cond_local_irq_enable(regs); 1106 1107 info.regs = regs; 1108 math_emulate(&info); 1109 1110 cond_local_irq_disable(regs); 1111 return; 1112 } 1113 #endif 1114 1115 /* This should not happen. */ 1116 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) { 1117 /* Try to fix it up and carry on. */ 1118 write_cr0(cr0 & ~X86_CR0_TS); 1119 } else { 1120 /* 1121 * Something terrible happened, and we're better off trying 1122 * to kill the task than getting stuck in a never-ending 1123 * loop of #NM faults. 1124 */ 1125 die("unexpected #NM exception", regs, 0); 1126 } 1127 } 1128 1129 #ifdef CONFIG_X86_32 1130 DEFINE_IDTENTRY_SW(iret_error) 1131 { 1132 local_irq_enable(); 1133 if (notify_die(DIE_TRAP, "iret exception", regs, 0, 1134 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 1135 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0, 1136 ILL_BADSTK, (void __user *)NULL); 1137 } 1138 local_irq_disable(); 1139 } 1140 #endif 1141 1142 void __init trap_init(void) 1143 { 1144 /* Init cpu_entry_area before IST entries are set up */ 1145 setup_cpu_entry_areas(); 1146 1147 /* Init GHCB memory pages when running as an SEV-ES guest */ 1148 sev_es_init_vc_handling(); 1149 1150 idt_setup_traps(); 1151 1152 /* 1153 * Should be a barrier for any external CPU state: 1154 */ 1155 cpu_init(); 1156 1157 idt_setup_ist_traps(); 1158 } 1159