1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/spinlock.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/kdebug.h> 22 #include <linux/kgdb.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ptrace.h> 26 #include <linux/uprobes.h> 27 #include <linux/string.h> 28 #include <linux/delay.h> 29 #include <linux/errno.h> 30 #include <linux/kexec.h> 31 #include <linux/sched.h> 32 #include <linux/timer.h> 33 #include <linux/init.h> 34 #include <linux/bug.h> 35 #include <linux/nmi.h> 36 #include <linux/mm.h> 37 #include <linux/smp.h> 38 #include <linux/io.h> 39 40 #ifdef CONFIG_EISA 41 #include <linux/ioport.h> 42 #include <linux/eisa.h> 43 #endif 44 45 #if defined(CONFIG_EDAC) 46 #include <linux/edac.h> 47 #endif 48 49 #include <asm/kmemcheck.h> 50 #include <asm/stacktrace.h> 51 #include <asm/processor.h> 52 #include <asm/debugreg.h> 53 #include <linux/atomic.h> 54 #include <asm/ftrace.h> 55 #include <asm/traps.h> 56 #include <asm/desc.h> 57 #include <asm/i387.h> 58 #include <asm/fpu-internal.h> 59 #include <asm/mce.h> 60 #include <asm/fixmap.h> 61 #include <asm/mach_traps.h> 62 #include <asm/alternative.h> 63 #include <asm/mpx.h> 64 65 #ifdef CONFIG_X86_64 66 #include <asm/x86_init.h> 67 #include <asm/pgalloc.h> 68 #include <asm/proto.h> 69 70 /* No need to be aligned, but done to keep all IDTs defined the same way. */ 71 gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; 72 #else 73 #include <asm/processor-flags.h> 74 #include <asm/setup.h> 75 76 asmlinkage int system_call(void); 77 #endif 78 79 /* Must be page-aligned because the real IDT is used in a fixmap. */ 80 gate_desc idt_table[NR_VECTORS] __page_aligned_bss; 81 82 DECLARE_BITMAP(used_vectors, NR_VECTORS); 83 EXPORT_SYMBOL_GPL(used_vectors); 84 85 static inline void conditional_sti(struct pt_regs *regs) 86 { 87 if (regs->flags & X86_EFLAGS_IF) 88 local_irq_enable(); 89 } 90 91 static inline void preempt_conditional_sti(struct pt_regs *regs) 92 { 93 preempt_count_inc(); 94 if (regs->flags & X86_EFLAGS_IF) 95 local_irq_enable(); 96 } 97 98 static inline void conditional_cli(struct pt_regs *regs) 99 { 100 if (regs->flags & X86_EFLAGS_IF) 101 local_irq_disable(); 102 } 103 104 static inline void preempt_conditional_cli(struct pt_regs *regs) 105 { 106 if (regs->flags & X86_EFLAGS_IF) 107 local_irq_disable(); 108 preempt_count_dec(); 109 } 110 111 enum ctx_state ist_enter(struct pt_regs *regs) 112 { 113 enum ctx_state prev_state; 114 115 if (user_mode(regs)) { 116 /* Other than that, we're just an exception. */ 117 prev_state = exception_enter(); 118 } else { 119 /* 120 * We might have interrupted pretty much anything. In 121 * fact, if we're a machine check, we can even interrupt 122 * NMI processing. We don't want in_nmi() to return true, 123 * but we need to notify RCU. 124 */ 125 rcu_nmi_enter(); 126 prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ 127 } 128 129 /* 130 * We are atomic because we're on the IST stack (or we're on x86_32, 131 * in which case we still shouldn't schedule). 132 * 133 * This must be after exception_enter(), because exception_enter() 134 * won't do anything if in_interrupt() returns true. 135 */ 136 preempt_count_add(HARDIRQ_OFFSET); 137 138 /* This code is a bit fragile. Test it. */ 139 rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); 140 141 return prev_state; 142 } 143 144 void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) 145 { 146 /* Must be before exception_exit. */ 147 preempt_count_sub(HARDIRQ_OFFSET); 148 149 if (user_mode(regs)) 150 return exception_exit(prev_state); 151 else 152 rcu_nmi_exit(); 153 } 154 155 /** 156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception 157 * @regs: regs passed to the IST exception handler 158 * 159 * IST exception handlers normally cannot schedule. As a special 160 * exception, if the exception interrupted userspace code (i.e. 161 * user_mode(regs) would return true) and the exception was not 162 * a double fault, it can be safe to schedule. ist_begin_non_atomic() 163 * begins a non-atomic section within an ist_enter()/ist_exit() region. 164 * Callers are responsible for enabling interrupts themselves inside 165 * the non-atomic section, and callers must call is_end_non_atomic() 166 * before ist_exit(). 167 */ 168 void ist_begin_non_atomic(struct pt_regs *regs) 169 { 170 BUG_ON(!user_mode(regs)); 171 172 /* 173 * Sanity check: we need to be on the normal thread stack. This 174 * will catch asm bugs and any attempt to use ist_preempt_enable 175 * from double_fault. 176 */ 177 BUG_ON((unsigned long)(current_top_of_stack() - 178 current_stack_pointer()) >= THREAD_SIZE); 179 180 preempt_count_sub(HARDIRQ_OFFSET); 181 } 182 183 /** 184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception 185 * 186 * Ends a non-atomic section started with ist_begin_non_atomic(). 187 */ 188 void ist_end_non_atomic(void) 189 { 190 preempt_count_add(HARDIRQ_OFFSET); 191 } 192 193 static nokprobe_inline int 194 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 195 struct pt_regs *regs, long error_code) 196 { 197 if (v8086_mode(regs)) { 198 /* 199 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 200 * On nmi (interrupt 2), do_trap should not be called. 201 */ 202 if (trapnr < X86_TRAP_UD) { 203 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 204 error_code, trapnr)) 205 return 0; 206 } 207 return -1; 208 } 209 210 if (!user_mode(regs)) { 211 if (!fixup_exception(regs)) { 212 tsk->thread.error_code = error_code; 213 tsk->thread.trap_nr = trapnr; 214 die(str, regs, error_code); 215 } 216 return 0; 217 } 218 219 return -1; 220 } 221 222 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, 223 siginfo_t *info) 224 { 225 unsigned long siaddr; 226 int sicode; 227 228 switch (trapnr) { 229 default: 230 return SEND_SIG_PRIV; 231 232 case X86_TRAP_DE: 233 sicode = FPE_INTDIV; 234 siaddr = uprobe_get_trap_addr(regs); 235 break; 236 case X86_TRAP_UD: 237 sicode = ILL_ILLOPN; 238 siaddr = uprobe_get_trap_addr(regs); 239 break; 240 case X86_TRAP_AC: 241 sicode = BUS_ADRALN; 242 siaddr = 0; 243 break; 244 } 245 246 info->si_signo = signr; 247 info->si_errno = 0; 248 info->si_code = sicode; 249 info->si_addr = (void __user *)siaddr; 250 return info; 251 } 252 253 static void 254 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 255 long error_code, siginfo_t *info) 256 { 257 struct task_struct *tsk = current; 258 259 260 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 261 return; 262 /* 263 * We want error_code and trap_nr set for userspace faults and 264 * kernelspace faults which result in die(), but not 265 * kernelspace faults which are fixed up. die() gives the 266 * process no chance to handle the signal and notice the 267 * kernel fault information, so that won't result in polluting 268 * the information about previously queued, but not yet 269 * delivered, faults. See also do_general_protection below. 270 */ 271 tsk->thread.error_code = error_code; 272 tsk->thread.trap_nr = trapnr; 273 274 #ifdef CONFIG_X86_64 275 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 276 printk_ratelimit()) { 277 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 278 tsk->comm, tsk->pid, str, 279 regs->ip, regs->sp, error_code); 280 print_vma_addr(" in ", regs->ip); 281 pr_cont("\n"); 282 } 283 #endif 284 285 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); 286 } 287 NOKPROBE_SYMBOL(do_trap); 288 289 static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 290 unsigned long trapnr, int signr) 291 { 292 enum ctx_state prev_state = exception_enter(); 293 siginfo_t info; 294 295 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != 296 NOTIFY_STOP) { 297 conditional_sti(regs); 298 do_trap(trapnr, signr, str, regs, error_code, 299 fill_trap_info(regs, signr, trapnr, &info)); 300 } 301 302 exception_exit(prev_state); 303 } 304 305 #define DO_ERROR(trapnr, signr, str, name) \ 306 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 307 { \ 308 do_error_trap(regs, error_code, str, trapnr, signr); \ 309 } 310 311 DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) 312 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 313 DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) 314 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) 315 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 316 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 317 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 318 DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) 319 320 #ifdef CONFIG_X86_64 321 /* Runs on IST stack */ 322 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 323 { 324 static const char str[] = "double fault"; 325 struct task_struct *tsk = current; 326 327 #ifdef CONFIG_X86_ESPFIX64 328 extern unsigned char native_irq_return_iret[]; 329 330 /* 331 * If IRET takes a non-IST fault on the espfix64 stack, then we 332 * end up promoting it to a doublefault. In that case, modify 333 * the stack to make it look like we just entered the #GP 334 * handler from user space, similar to bad_iret. 335 * 336 * No need for ist_enter here because we don't use RCU. 337 */ 338 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && 339 regs->cs == __KERNEL_CS && 340 regs->ip == (unsigned long)native_irq_return_iret) 341 { 342 struct pt_regs *normal_regs = task_pt_regs(current); 343 344 /* Fake a #GP(0) from userspace. */ 345 memmove(&normal_regs->ip, (void *)regs->sp, 5*8); 346 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ 347 regs->ip = (unsigned long)general_protection; 348 regs->sp = (unsigned long)&normal_regs->orig_ax; 349 350 return; 351 } 352 #endif 353 354 ist_enter(regs); /* Discard prev_state because we won't return. */ 355 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 356 357 tsk->thread.error_code = error_code; 358 tsk->thread.trap_nr = X86_TRAP_DF; 359 360 #ifdef CONFIG_DOUBLEFAULT 361 df_debug(regs, error_code); 362 #endif 363 /* 364 * This is always a kernel trap and never fixable (and thus must 365 * never return). 366 */ 367 for (;;) 368 die(str, regs, error_code); 369 } 370 #endif 371 372 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) 373 { 374 struct task_struct *tsk = current; 375 struct xsave_struct *xsave_buf; 376 enum ctx_state prev_state; 377 struct bndcsr *bndcsr; 378 siginfo_t *info; 379 380 prev_state = exception_enter(); 381 if (notify_die(DIE_TRAP, "bounds", regs, error_code, 382 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) 383 goto exit; 384 conditional_sti(regs); 385 386 if (!user_mode(regs)) 387 die("bounds", regs, error_code); 388 389 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 390 /* The exception is not from Intel MPX */ 391 goto exit_trap; 392 } 393 394 /* 395 * We need to look at BNDSTATUS to resolve this exception. 396 * It is not directly accessible, though, so we need to 397 * do an xsave and then pull it out of the xsave buffer. 398 */ 399 fpu_save_init(&tsk->thread.fpu); 400 xsave_buf = &(tsk->thread.fpu.state->xsave); 401 bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); 402 if (!bndcsr) 403 goto exit_trap; 404 405 /* 406 * The error code field of the BNDSTATUS register communicates status 407 * information of a bound range exception #BR or operation involving 408 * bound directory. 409 */ 410 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { 411 case 2: /* Bound directory has invalid entry. */ 412 if (mpx_handle_bd_fault(xsave_buf)) 413 goto exit_trap; 414 break; /* Success, it was handled */ 415 case 1: /* Bound violation. */ 416 info = mpx_generate_siginfo(regs, xsave_buf); 417 if (IS_ERR(info)) { 418 /* 419 * We failed to decode the MPX instruction. Act as if 420 * the exception was not caused by MPX. 421 */ 422 goto exit_trap; 423 } 424 /* 425 * Success, we decoded the instruction and retrieved 426 * an 'info' containing the address being accessed 427 * which caused the exception. This information 428 * allows and application to possibly handle the 429 * #BR exception itself. 430 */ 431 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); 432 kfree(info); 433 break; 434 case 0: /* No exception caused by Intel MPX operations. */ 435 goto exit_trap; 436 default: 437 die("bounds", regs, error_code); 438 } 439 440 exit: 441 exception_exit(prev_state); 442 return; 443 exit_trap: 444 /* 445 * This path out is for all the cases where we could not 446 * handle the exception in some way (like allocating a 447 * table or telling userspace about it. We will also end 448 * up here if the kernel has MPX turned off at compile 449 * time.. 450 */ 451 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); 452 exception_exit(prev_state); 453 } 454 455 dotraplinkage void 456 do_general_protection(struct pt_regs *regs, long error_code) 457 { 458 struct task_struct *tsk; 459 enum ctx_state prev_state; 460 461 prev_state = exception_enter(); 462 conditional_sti(regs); 463 464 if (v8086_mode(regs)) { 465 local_irq_enable(); 466 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 467 goto exit; 468 } 469 470 tsk = current; 471 if (!user_mode(regs)) { 472 if (fixup_exception(regs)) 473 goto exit; 474 475 tsk->thread.error_code = error_code; 476 tsk->thread.trap_nr = X86_TRAP_GP; 477 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 478 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 479 die("general protection fault", regs, error_code); 480 goto exit; 481 } 482 483 tsk->thread.error_code = error_code; 484 tsk->thread.trap_nr = X86_TRAP_GP; 485 486 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 487 printk_ratelimit()) { 488 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 489 tsk->comm, task_pid_nr(tsk), 490 regs->ip, regs->sp, error_code); 491 print_vma_addr(" in ", regs->ip); 492 pr_cont("\n"); 493 } 494 495 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 496 exit: 497 exception_exit(prev_state); 498 } 499 NOKPROBE_SYMBOL(do_general_protection); 500 501 /* May run on IST stack. */ 502 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) 503 { 504 enum ctx_state prev_state; 505 506 #ifdef CONFIG_DYNAMIC_FTRACE 507 /* 508 * ftrace must be first, everything else may cause a recursive crash. 509 * See note by declaration of modifying_ftrace_code in ftrace.c 510 */ 511 if (unlikely(atomic_read(&modifying_ftrace_code)) && 512 ftrace_int3_handler(regs)) 513 return; 514 #endif 515 if (poke_int3_handler(regs)) 516 return; 517 518 prev_state = ist_enter(regs); 519 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 520 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 521 SIGTRAP) == NOTIFY_STOP) 522 goto exit; 523 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 524 525 #ifdef CONFIG_KPROBES 526 if (kprobe_int3_handler(regs)) 527 goto exit; 528 #endif 529 530 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 531 SIGTRAP) == NOTIFY_STOP) 532 goto exit; 533 534 /* 535 * Let others (NMI) know that the debug stack is in use 536 * as we may switch to the interrupt stack. 537 */ 538 debug_stack_usage_inc(); 539 preempt_conditional_sti(regs); 540 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 541 preempt_conditional_cli(regs); 542 debug_stack_usage_dec(); 543 exit: 544 ist_exit(regs, prev_state); 545 } 546 NOKPROBE_SYMBOL(do_int3); 547 548 #ifdef CONFIG_X86_64 549 /* 550 * Help handler running on IST stack to switch off the IST stack if the 551 * interrupted code was in user mode. The actual stack switch is done in 552 * entry_64.S 553 */ 554 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) 555 { 556 struct pt_regs *regs = task_pt_regs(current); 557 *regs = *eregs; 558 return regs; 559 } 560 NOKPROBE_SYMBOL(sync_regs); 561 562 struct bad_iret_stack { 563 void *error_entry_ret; 564 struct pt_regs regs; 565 }; 566 567 asmlinkage __visible notrace 568 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) 569 { 570 /* 571 * This is called from entry_64.S early in handling a fault 572 * caused by a bad iret to user mode. To handle the fault 573 * correctly, we want move our stack frame to task_pt_regs 574 * and we want to pretend that the exception came from the 575 * iret target. 576 */ 577 struct bad_iret_stack *new_stack = 578 container_of(task_pt_regs(current), 579 struct bad_iret_stack, regs); 580 581 /* Copy the IRET target to the new stack. */ 582 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); 583 584 /* Copy the remainder of the stack from the current stack. */ 585 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); 586 587 BUG_ON(!user_mode(&new_stack->regs)); 588 return new_stack; 589 } 590 NOKPROBE_SYMBOL(fixup_bad_iret); 591 #endif 592 593 /* 594 * Our handling of the processor debug registers is non-trivial. 595 * We do not clear them on entry and exit from the kernel. Therefore 596 * it is possible to get a watchpoint trap here from inside the kernel. 597 * However, the code in ./ptrace.c has ensured that the user can 598 * only set watchpoints on userspace addresses. Therefore the in-kernel 599 * watchpoint trap can only occur in code which is reading/writing 600 * from user space. Such code must not hold kernel locks (since it 601 * can equally take a page fault), therefore it is safe to call 602 * force_sig_info even though that claims and releases locks. 603 * 604 * Code in ./signal.c ensures that the debug control register 605 * is restored before we deliver any signal, and therefore that 606 * user code runs with the correct debug control register even though 607 * we clear it here. 608 * 609 * Being careful here means that we don't have to be as careful in a 610 * lot of more complicated places (task switching can be a bit lazy 611 * about restoring all the debug state, and ptrace doesn't have to 612 * find every occurrence of the TF bit that could be saved away even 613 * by user code) 614 * 615 * May run on IST stack. 616 */ 617 dotraplinkage void do_debug(struct pt_regs *regs, long error_code) 618 { 619 struct task_struct *tsk = current; 620 enum ctx_state prev_state; 621 int user_icebp = 0; 622 unsigned long dr6; 623 int si_code; 624 625 prev_state = ist_enter(regs); 626 627 get_debugreg(dr6, 6); 628 629 /* Filter out all the reserved bits which are preset to 1 */ 630 dr6 &= ~DR6_RESERVED; 631 632 /* 633 * If dr6 has no reason to give us about the origin of this trap, 634 * then it's very likely the result of an icebp/int01 trap. 635 * User wants a sigtrap for that. 636 */ 637 if (!dr6 && user_mode(regs)) 638 user_icebp = 1; 639 640 /* Catch kmemcheck conditions first of all! */ 641 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 642 goto exit; 643 644 /* DR6 may or may not be cleared by the CPU */ 645 set_debugreg(0, 6); 646 647 /* 648 * The processor cleared BTF, so don't mark that we need it set. 649 */ 650 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 651 652 /* Store the virtualized DR6 value */ 653 tsk->thread.debugreg6 = dr6; 654 655 #ifdef CONFIG_KPROBES 656 if (kprobe_debug_handler(regs)) 657 goto exit; 658 #endif 659 660 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, 661 SIGTRAP) == NOTIFY_STOP) 662 goto exit; 663 664 /* 665 * Let others (NMI) know that the debug stack is in use 666 * as we may switch to the interrupt stack. 667 */ 668 debug_stack_usage_inc(); 669 670 /* It's safe to allow irq's after DR6 has been saved */ 671 preempt_conditional_sti(regs); 672 673 if (v8086_mode(regs)) { 674 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 675 X86_TRAP_DB); 676 preempt_conditional_cli(regs); 677 debug_stack_usage_dec(); 678 goto exit; 679 } 680 681 /* 682 * Single-stepping through system calls: ignore any exceptions in 683 * kernel space, but re-enable TF when returning to user mode. 684 * 685 * We already checked v86 mode above, so we can check for kernel mode 686 * by just checking the CPL of CS. 687 */ 688 if ((dr6 & DR_STEP) && !user_mode(regs)) { 689 tsk->thread.debugreg6 &= ~DR_STEP; 690 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 691 regs->flags &= ~X86_EFLAGS_TF; 692 } 693 si_code = get_si_code(tsk->thread.debugreg6); 694 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 695 send_sigtrap(tsk, regs, error_code, si_code); 696 preempt_conditional_cli(regs); 697 debug_stack_usage_dec(); 698 699 exit: 700 ist_exit(regs, prev_state); 701 } 702 NOKPROBE_SYMBOL(do_debug); 703 704 /* 705 * Note that we play around with the 'TS' bit in an attempt to get 706 * the correct behaviour even in the presence of the asynchronous 707 * IRQ13 behaviour 708 */ 709 static void math_error(struct pt_regs *regs, int error_code, int trapnr) 710 { 711 struct task_struct *task = current; 712 siginfo_t info; 713 unsigned short err; 714 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 715 "simd exception"; 716 717 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 718 return; 719 conditional_sti(regs); 720 721 if (!user_mode(regs)) 722 { 723 if (!fixup_exception(regs)) { 724 task->thread.error_code = error_code; 725 task->thread.trap_nr = trapnr; 726 die(str, regs, error_code); 727 } 728 return; 729 } 730 731 /* 732 * Save the info for the exception handler and clear the error. 733 */ 734 unlazy_fpu(task); 735 task->thread.trap_nr = trapnr; 736 task->thread.error_code = error_code; 737 info.si_signo = SIGFPE; 738 info.si_errno = 0; 739 info.si_addr = (void __user *)uprobe_get_trap_addr(regs); 740 if (trapnr == X86_TRAP_MF) { 741 unsigned short cwd, swd; 742 /* 743 * (~cwd & swd) will mask out exceptions that are not set to unmasked 744 * status. 0x3f is the exception bits in these regs, 0x200 is the 745 * C1 reg you need in case of a stack fault, 0x040 is the stack 746 * fault bit. We should only be taking one exception at a time, 747 * so if this combination doesn't produce any single exception, 748 * then we have a bad program that isn't synchronizing its FPU usage 749 * and it will suffer the consequences since we won't be able to 750 * fully reproduce the context of the exception 751 */ 752 cwd = get_fpu_cwd(task); 753 swd = get_fpu_swd(task); 754 755 err = swd & ~cwd; 756 } else { 757 /* 758 * The SIMD FPU exceptions are handled a little differently, as there 759 * is only a single status/control register. Thus, to determine which 760 * unmasked exception was caught we must mask the exception mask bits 761 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 762 */ 763 unsigned short mxcsr = get_fpu_mxcsr(task); 764 err = ~(mxcsr >> 7) & mxcsr; 765 } 766 767 if (err & 0x001) { /* Invalid op */ 768 /* 769 * swd & 0x240 == 0x040: Stack Underflow 770 * swd & 0x240 == 0x240: Stack Overflow 771 * User must clear the SF bit (0x40) if set 772 */ 773 info.si_code = FPE_FLTINV; 774 } else if (err & 0x004) { /* Divide by Zero */ 775 info.si_code = FPE_FLTDIV; 776 } else if (err & 0x008) { /* Overflow */ 777 info.si_code = FPE_FLTOVF; 778 } else if (err & 0x012) { /* Denormal, Underflow */ 779 info.si_code = FPE_FLTUND; 780 } else if (err & 0x020) { /* Precision */ 781 info.si_code = FPE_FLTRES; 782 } else { 783 /* 784 * If we're using IRQ 13, or supposedly even some trap 785 * X86_TRAP_MF implementations, it's possible 786 * we get a spurious trap, which is not an error. 787 */ 788 return; 789 } 790 force_sig_info(SIGFPE, &info, task); 791 } 792 793 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 794 { 795 enum ctx_state prev_state; 796 797 prev_state = exception_enter(); 798 math_error(regs, error_code, X86_TRAP_MF); 799 exception_exit(prev_state); 800 } 801 802 dotraplinkage void 803 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 804 { 805 enum ctx_state prev_state; 806 807 prev_state = exception_enter(); 808 math_error(regs, error_code, X86_TRAP_XF); 809 exception_exit(prev_state); 810 } 811 812 dotraplinkage void 813 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 814 { 815 conditional_sti(regs); 816 #if 0 817 /* No need to warn about this any longer. */ 818 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 819 #endif 820 } 821 822 asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) 823 { 824 } 825 826 asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) 827 { 828 } 829 830 /* 831 * 'math_state_restore()' saves the current math information in the 832 * old math state array, and gets the new ones from the current task 833 * 834 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 835 * Don't touch unless you *really* know how it works. 836 * 837 * Must be called with kernel preemption disabled (eg with local 838 * local interrupts as in the case of do_device_not_available). 839 */ 840 void math_state_restore(void) 841 { 842 struct task_struct *tsk = current; 843 844 if (!tsk_used_math(tsk)) { 845 local_irq_enable(); 846 /* 847 * does a slab alloc which can sleep 848 */ 849 if (init_fpu(tsk)) { 850 /* 851 * ran out of memory! 852 */ 853 do_group_exit(SIGKILL); 854 return; 855 } 856 local_irq_disable(); 857 } 858 859 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ 860 kernel_fpu_disable(); 861 __thread_fpu_begin(tsk); 862 if (unlikely(restore_fpu_checking(tsk))) { 863 fpu_reset_state(tsk); 864 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 865 } else { 866 tsk->thread.fpu_counter++; 867 } 868 kernel_fpu_enable(); 869 } 870 EXPORT_SYMBOL_GPL(math_state_restore); 871 872 dotraplinkage void 873 do_device_not_available(struct pt_regs *regs, long error_code) 874 { 875 enum ctx_state prev_state; 876 877 prev_state = exception_enter(); 878 BUG_ON(use_eager_fpu()); 879 880 #ifdef CONFIG_MATH_EMULATION 881 if (read_cr0() & X86_CR0_EM) { 882 struct math_emu_info info = { }; 883 884 conditional_sti(regs); 885 886 info.regs = regs; 887 math_emulate(&info); 888 exception_exit(prev_state); 889 return; 890 } 891 #endif 892 math_state_restore(); /* interrupts still off */ 893 #ifdef CONFIG_X86_32 894 conditional_sti(regs); 895 #endif 896 exception_exit(prev_state); 897 } 898 NOKPROBE_SYMBOL(do_device_not_available); 899 900 #ifdef CONFIG_X86_32 901 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 902 { 903 siginfo_t info; 904 enum ctx_state prev_state; 905 906 prev_state = exception_enter(); 907 local_irq_enable(); 908 909 info.si_signo = SIGILL; 910 info.si_errno = 0; 911 info.si_code = ILL_BADSTK; 912 info.si_addr = NULL; 913 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 914 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 915 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 916 &info); 917 } 918 exception_exit(prev_state); 919 } 920 #endif 921 922 /* Set of traps needed for early debugging. */ 923 void __init early_trap_init(void) 924 { 925 /* 926 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS 927 * is ready in cpu_init() <-- trap_init(). Before trap_init(), 928 * CPU runs at ring 0 so it is impossible to hit an invalid 929 * stack. Using the original stack works well enough at this 930 * early stage. DEBUG_STACK will be equipped after cpu_init() in 931 * trap_init(). 932 * 933 * We don't need to set trace_idt_table like set_intr_gate(), 934 * since we don't have trace_debug and it will be reset to 935 * 'debug' in trap_init() by set_intr_gate_ist(). 936 */ 937 set_intr_gate_notrace(X86_TRAP_DB, debug); 938 /* int3 can be called from all */ 939 set_system_intr_gate(X86_TRAP_BP, &int3); 940 #ifdef CONFIG_X86_32 941 set_intr_gate(X86_TRAP_PF, page_fault); 942 #endif 943 load_idt(&idt_descr); 944 } 945 946 void __init early_trap_pf_init(void) 947 { 948 #ifdef CONFIG_X86_64 949 set_intr_gate(X86_TRAP_PF, page_fault); 950 #endif 951 } 952 953 void __init trap_init(void) 954 { 955 int i; 956 957 #ifdef CONFIG_EISA 958 void __iomem *p = early_ioremap(0x0FFFD9, 4); 959 960 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 961 EISA_bus = 1; 962 early_iounmap(p, 4); 963 #endif 964 965 set_intr_gate(X86_TRAP_DE, divide_error); 966 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 967 /* int4 can be called from all */ 968 set_system_intr_gate(X86_TRAP_OF, &overflow); 969 set_intr_gate(X86_TRAP_BR, bounds); 970 set_intr_gate(X86_TRAP_UD, invalid_op); 971 set_intr_gate(X86_TRAP_NM, device_not_available); 972 #ifdef CONFIG_X86_32 973 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 974 #else 975 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 976 #endif 977 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); 978 set_intr_gate(X86_TRAP_TS, invalid_TSS); 979 set_intr_gate(X86_TRAP_NP, segment_not_present); 980 set_intr_gate(X86_TRAP_SS, stack_segment); 981 set_intr_gate(X86_TRAP_GP, general_protection); 982 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); 983 set_intr_gate(X86_TRAP_MF, coprocessor_error); 984 set_intr_gate(X86_TRAP_AC, alignment_check); 985 #ifdef CONFIG_X86_MCE 986 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 987 #endif 988 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); 989 990 /* Reserve all the builtin and the syscall vector: */ 991 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 992 set_bit(i, used_vectors); 993 994 #ifdef CONFIG_IA32_EMULATION 995 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 996 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 997 #endif 998 999 #ifdef CONFIG_X86_32 1000 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 1001 set_bit(SYSCALL_VECTOR, used_vectors); 1002 #endif 1003 1004 /* 1005 * Set the IDT descriptor to a fixed read-only location, so that the 1006 * "sidt" instruction will not leak the location of the kernel, and 1007 * to defend the IDT against arbitrary memory write vulnerabilities. 1008 * It will be reloaded in cpu_init() */ 1009 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 1010 idt_descr.address = fix_to_virt(FIX_RO_IDT); 1011 1012 /* 1013 * Should be a barrier for any external CPU state: 1014 */ 1015 cpu_init(); 1016 1017 /* 1018 * X86_TRAP_DB and X86_TRAP_BP have been set 1019 * in early_trap_init(). However, ITS works only after 1020 * cpu_init() loads TSS. See comments in early_trap_init(). 1021 */ 1022 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 1023 /* int3 can be called from all */ 1024 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 1025 1026 x86_init.irqs.trap_init(); 1027 1028 #ifdef CONFIG_X86_64 1029 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); 1030 set_nmi_gate(X86_TRAP_DB, &debug); 1031 set_nmi_gate(X86_TRAP_BP, &int3); 1032 #endif 1033 } 1034