1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/spinlock.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/kdebug.h> 22 #include <linux/kgdb.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ptrace.h> 26 #include <linux/uprobes.h> 27 #include <linux/string.h> 28 #include <linux/delay.h> 29 #include <linux/errno.h> 30 #include <linux/kexec.h> 31 #include <linux/sched.h> 32 #include <linux/timer.h> 33 #include <linux/init.h> 34 #include <linux/bug.h> 35 #include <linux/nmi.h> 36 #include <linux/mm.h> 37 #include <linux/smp.h> 38 #include <linux/io.h> 39 40 #ifdef CONFIG_EISA 41 #include <linux/ioport.h> 42 #include <linux/eisa.h> 43 #endif 44 45 #if defined(CONFIG_EDAC) 46 #include <linux/edac.h> 47 #endif 48 49 #include <asm/kmemcheck.h> 50 #include <asm/stacktrace.h> 51 #include <asm/processor.h> 52 #include <asm/debugreg.h> 53 #include <linux/atomic.h> 54 #include <asm/ftrace.h> 55 #include <asm/traps.h> 56 #include <asm/desc.h> 57 #include <asm/i387.h> 58 #include <asm/fpu-internal.h> 59 #include <asm/mce.h> 60 #include <asm/fixmap.h> 61 #include <asm/mach_traps.h> 62 #include <asm/alternative.h> 63 #include <asm/mpx.h> 64 65 #ifdef CONFIG_X86_64 66 #include <asm/x86_init.h> 67 #include <asm/pgalloc.h> 68 #include <asm/proto.h> 69 70 /* No need to be aligned, but done to keep all IDTs defined the same way. */ 71 gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; 72 #else 73 #include <asm/processor-flags.h> 74 #include <asm/setup.h> 75 76 asmlinkage int system_call(void); 77 #endif 78 79 /* Must be page-aligned because the real IDT is used in a fixmap. */ 80 gate_desc idt_table[NR_VECTORS] __page_aligned_bss; 81 82 DECLARE_BITMAP(used_vectors, NR_VECTORS); 83 EXPORT_SYMBOL_GPL(used_vectors); 84 85 static inline void conditional_sti(struct pt_regs *regs) 86 { 87 if (regs->flags & X86_EFLAGS_IF) 88 local_irq_enable(); 89 } 90 91 static inline void preempt_conditional_sti(struct pt_regs *regs) 92 { 93 preempt_count_inc(); 94 if (regs->flags & X86_EFLAGS_IF) 95 local_irq_enable(); 96 } 97 98 static inline void conditional_cli(struct pt_regs *regs) 99 { 100 if (regs->flags & X86_EFLAGS_IF) 101 local_irq_disable(); 102 } 103 104 static inline void preempt_conditional_cli(struct pt_regs *regs) 105 { 106 if (regs->flags & X86_EFLAGS_IF) 107 local_irq_disable(); 108 preempt_count_dec(); 109 } 110 111 enum ctx_state ist_enter(struct pt_regs *regs) 112 { 113 enum ctx_state prev_state; 114 115 if (user_mode_vm(regs)) { 116 /* Other than that, we're just an exception. */ 117 prev_state = exception_enter(); 118 } else { 119 /* 120 * We might have interrupted pretty much anything. In 121 * fact, if we're a machine check, we can even interrupt 122 * NMI processing. We don't want in_nmi() to return true, 123 * but we need to notify RCU. 124 */ 125 rcu_nmi_enter(); 126 prev_state = IN_KERNEL; /* the value is irrelevant. */ 127 } 128 129 /* 130 * We are atomic because we're on the IST stack (or we're on x86_32, 131 * in which case we still shouldn't schedule). 132 * 133 * This must be after exception_enter(), because exception_enter() 134 * won't do anything if in_interrupt() returns true. 135 */ 136 preempt_count_add(HARDIRQ_OFFSET); 137 138 /* This code is a bit fragile. Test it. */ 139 rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); 140 141 return prev_state; 142 } 143 144 void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) 145 { 146 /* Must be before exception_exit. */ 147 preempt_count_sub(HARDIRQ_OFFSET); 148 149 if (user_mode_vm(regs)) 150 return exception_exit(prev_state); 151 else 152 rcu_nmi_exit(); 153 } 154 155 /** 156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception 157 * @regs: regs passed to the IST exception handler 158 * 159 * IST exception handlers normally cannot schedule. As a special 160 * exception, if the exception interrupted userspace code (i.e. 161 * user_mode_vm(regs) would return true) and the exception was not 162 * a double fault, it can be safe to schedule. ist_begin_non_atomic() 163 * begins a non-atomic section within an ist_enter()/ist_exit() region. 164 * Callers are responsible for enabling interrupts themselves inside 165 * the non-atomic section, and callers must call is_end_non_atomic() 166 * before ist_exit(). 167 */ 168 void ist_begin_non_atomic(struct pt_regs *regs) 169 { 170 BUG_ON(!user_mode_vm(regs)); 171 172 /* 173 * Sanity check: we need to be on the normal thread stack. This 174 * will catch asm bugs and any attempt to use ist_preempt_enable 175 * from double_fault. 176 */ 177 BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack)) 178 & ~(THREAD_SIZE - 1)) != 0); 179 180 preempt_count_sub(HARDIRQ_OFFSET); 181 } 182 183 /** 184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception 185 * 186 * Ends a non-atomic section started with ist_begin_non_atomic(). 187 */ 188 void ist_end_non_atomic(void) 189 { 190 preempt_count_add(HARDIRQ_OFFSET); 191 } 192 193 static nokprobe_inline int 194 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 195 struct pt_regs *regs, long error_code) 196 { 197 #ifdef CONFIG_X86_32 198 if (regs->flags & X86_VM_MASK) { 199 /* 200 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 201 * On nmi (interrupt 2), do_trap should not be called. 202 */ 203 if (trapnr < X86_TRAP_UD) { 204 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 205 error_code, trapnr)) 206 return 0; 207 } 208 return -1; 209 } 210 #endif 211 if (!user_mode(regs)) { 212 if (!fixup_exception(regs)) { 213 tsk->thread.error_code = error_code; 214 tsk->thread.trap_nr = trapnr; 215 die(str, regs, error_code); 216 } 217 return 0; 218 } 219 220 return -1; 221 } 222 223 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, 224 siginfo_t *info) 225 { 226 unsigned long siaddr; 227 int sicode; 228 229 switch (trapnr) { 230 default: 231 return SEND_SIG_PRIV; 232 233 case X86_TRAP_DE: 234 sicode = FPE_INTDIV; 235 siaddr = uprobe_get_trap_addr(regs); 236 break; 237 case X86_TRAP_UD: 238 sicode = ILL_ILLOPN; 239 siaddr = uprobe_get_trap_addr(regs); 240 break; 241 case X86_TRAP_AC: 242 sicode = BUS_ADRALN; 243 siaddr = 0; 244 break; 245 } 246 247 info->si_signo = signr; 248 info->si_errno = 0; 249 info->si_code = sicode; 250 info->si_addr = (void __user *)siaddr; 251 return info; 252 } 253 254 static void 255 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 256 long error_code, siginfo_t *info) 257 { 258 struct task_struct *tsk = current; 259 260 261 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 262 return; 263 /* 264 * We want error_code and trap_nr set for userspace faults and 265 * kernelspace faults which result in die(), but not 266 * kernelspace faults which are fixed up. die() gives the 267 * process no chance to handle the signal and notice the 268 * kernel fault information, so that won't result in polluting 269 * the information about previously queued, but not yet 270 * delivered, faults. See also do_general_protection below. 271 */ 272 tsk->thread.error_code = error_code; 273 tsk->thread.trap_nr = trapnr; 274 275 #ifdef CONFIG_X86_64 276 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 277 printk_ratelimit()) { 278 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 279 tsk->comm, tsk->pid, str, 280 regs->ip, regs->sp, error_code); 281 print_vma_addr(" in ", regs->ip); 282 pr_cont("\n"); 283 } 284 #endif 285 286 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); 287 } 288 NOKPROBE_SYMBOL(do_trap); 289 290 static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 291 unsigned long trapnr, int signr) 292 { 293 enum ctx_state prev_state = exception_enter(); 294 siginfo_t info; 295 296 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != 297 NOTIFY_STOP) { 298 conditional_sti(regs); 299 do_trap(trapnr, signr, str, regs, error_code, 300 fill_trap_info(regs, signr, trapnr, &info)); 301 } 302 303 exception_exit(prev_state); 304 } 305 306 #define DO_ERROR(trapnr, signr, str, name) \ 307 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 308 { \ 309 do_error_trap(regs, error_code, str, trapnr, signr); \ 310 } 311 312 DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) 313 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 314 DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) 315 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) 316 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 317 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 318 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 319 DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) 320 321 #ifdef CONFIG_X86_64 322 /* Runs on IST stack */ 323 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 324 { 325 static const char str[] = "double fault"; 326 struct task_struct *tsk = current; 327 328 #ifdef CONFIG_X86_ESPFIX64 329 extern unsigned char native_irq_return_iret[]; 330 331 /* 332 * If IRET takes a non-IST fault on the espfix64 stack, then we 333 * end up promoting it to a doublefault. In that case, modify 334 * the stack to make it look like we just entered the #GP 335 * handler from user space, similar to bad_iret. 336 * 337 * No need for ist_enter here because we don't use RCU. 338 */ 339 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && 340 regs->cs == __KERNEL_CS && 341 regs->ip == (unsigned long)native_irq_return_iret) 342 { 343 struct pt_regs *normal_regs = task_pt_regs(current); 344 345 /* Fake a #GP(0) from userspace. */ 346 memmove(&normal_regs->ip, (void *)regs->sp, 5*8); 347 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ 348 regs->ip = (unsigned long)general_protection; 349 regs->sp = (unsigned long)&normal_regs->orig_ax; 350 351 return; 352 } 353 #endif 354 355 ist_enter(regs); /* Discard prev_state because we won't return. */ 356 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 357 358 tsk->thread.error_code = error_code; 359 tsk->thread.trap_nr = X86_TRAP_DF; 360 361 #ifdef CONFIG_DOUBLEFAULT 362 df_debug(regs, error_code); 363 #endif 364 /* 365 * This is always a kernel trap and never fixable (and thus must 366 * never return). 367 */ 368 for (;;) 369 die(str, regs, error_code); 370 } 371 #endif 372 373 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) 374 { 375 struct task_struct *tsk = current; 376 struct xsave_struct *xsave_buf; 377 enum ctx_state prev_state; 378 struct bndcsr *bndcsr; 379 siginfo_t *info; 380 381 prev_state = exception_enter(); 382 if (notify_die(DIE_TRAP, "bounds", regs, error_code, 383 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) 384 goto exit; 385 conditional_sti(regs); 386 387 if (!user_mode(regs)) 388 die("bounds", regs, error_code); 389 390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 391 /* The exception is not from Intel MPX */ 392 goto exit_trap; 393 } 394 395 /* 396 * We need to look at BNDSTATUS to resolve this exception. 397 * It is not directly accessible, though, so we need to 398 * do an xsave and then pull it out of the xsave buffer. 399 */ 400 fpu_save_init(&tsk->thread.fpu); 401 xsave_buf = &(tsk->thread.fpu.state->xsave); 402 bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); 403 if (!bndcsr) 404 goto exit_trap; 405 406 /* 407 * The error code field of the BNDSTATUS register communicates status 408 * information of a bound range exception #BR or operation involving 409 * bound directory. 410 */ 411 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { 412 case 2: /* Bound directory has invalid entry. */ 413 if (mpx_handle_bd_fault(xsave_buf)) 414 goto exit_trap; 415 break; /* Success, it was handled */ 416 case 1: /* Bound violation. */ 417 info = mpx_generate_siginfo(regs, xsave_buf); 418 if (IS_ERR(info)) { 419 /* 420 * We failed to decode the MPX instruction. Act as if 421 * the exception was not caused by MPX. 422 */ 423 goto exit_trap; 424 } 425 /* 426 * Success, we decoded the instruction and retrieved 427 * an 'info' containing the address being accessed 428 * which caused the exception. This information 429 * allows and application to possibly handle the 430 * #BR exception itself. 431 */ 432 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); 433 kfree(info); 434 break; 435 case 0: /* No exception caused by Intel MPX operations. */ 436 goto exit_trap; 437 default: 438 die("bounds", regs, error_code); 439 } 440 441 exit: 442 exception_exit(prev_state); 443 return; 444 exit_trap: 445 /* 446 * This path out is for all the cases where we could not 447 * handle the exception in some way (like allocating a 448 * table or telling userspace about it. We will also end 449 * up here if the kernel has MPX turned off at compile 450 * time.. 451 */ 452 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); 453 exception_exit(prev_state); 454 } 455 456 dotraplinkage void 457 do_general_protection(struct pt_regs *regs, long error_code) 458 { 459 struct task_struct *tsk; 460 enum ctx_state prev_state; 461 462 prev_state = exception_enter(); 463 conditional_sti(regs); 464 465 #ifdef CONFIG_X86_32 466 if (regs->flags & X86_VM_MASK) { 467 local_irq_enable(); 468 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 469 goto exit; 470 } 471 #endif 472 473 tsk = current; 474 if (!user_mode(regs)) { 475 if (fixup_exception(regs)) 476 goto exit; 477 478 tsk->thread.error_code = error_code; 479 tsk->thread.trap_nr = X86_TRAP_GP; 480 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 481 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 482 die("general protection fault", regs, error_code); 483 goto exit; 484 } 485 486 tsk->thread.error_code = error_code; 487 tsk->thread.trap_nr = X86_TRAP_GP; 488 489 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 490 printk_ratelimit()) { 491 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 492 tsk->comm, task_pid_nr(tsk), 493 regs->ip, regs->sp, error_code); 494 print_vma_addr(" in ", regs->ip); 495 pr_cont("\n"); 496 } 497 498 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 499 exit: 500 exception_exit(prev_state); 501 } 502 NOKPROBE_SYMBOL(do_general_protection); 503 504 /* May run on IST stack. */ 505 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) 506 { 507 enum ctx_state prev_state; 508 509 #ifdef CONFIG_DYNAMIC_FTRACE 510 /* 511 * ftrace must be first, everything else may cause a recursive crash. 512 * See note by declaration of modifying_ftrace_code in ftrace.c 513 */ 514 if (unlikely(atomic_read(&modifying_ftrace_code)) && 515 ftrace_int3_handler(regs)) 516 return; 517 #endif 518 if (poke_int3_handler(regs)) 519 return; 520 521 prev_state = ist_enter(regs); 522 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 523 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 524 SIGTRAP) == NOTIFY_STOP) 525 goto exit; 526 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 527 528 #ifdef CONFIG_KPROBES 529 if (kprobe_int3_handler(regs)) 530 goto exit; 531 #endif 532 533 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 534 SIGTRAP) == NOTIFY_STOP) 535 goto exit; 536 537 /* 538 * Let others (NMI) know that the debug stack is in use 539 * as we may switch to the interrupt stack. 540 */ 541 debug_stack_usage_inc(); 542 preempt_conditional_sti(regs); 543 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 544 preempt_conditional_cli(regs); 545 debug_stack_usage_dec(); 546 exit: 547 ist_exit(regs, prev_state); 548 } 549 NOKPROBE_SYMBOL(do_int3); 550 551 #ifdef CONFIG_X86_64 552 /* 553 * Help handler running on IST stack to switch off the IST stack if the 554 * interrupted code was in user mode. The actual stack switch is done in 555 * entry_64.S 556 */ 557 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) 558 { 559 struct pt_regs *regs = task_pt_regs(current); 560 *regs = *eregs; 561 return regs; 562 } 563 NOKPROBE_SYMBOL(sync_regs); 564 565 struct bad_iret_stack { 566 void *error_entry_ret; 567 struct pt_regs regs; 568 }; 569 570 asmlinkage __visible notrace 571 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) 572 { 573 /* 574 * This is called from entry_64.S early in handling a fault 575 * caused by a bad iret to user mode. To handle the fault 576 * correctly, we want move our stack frame to task_pt_regs 577 * and we want to pretend that the exception came from the 578 * iret target. 579 */ 580 struct bad_iret_stack *new_stack = 581 container_of(task_pt_regs(current), 582 struct bad_iret_stack, regs); 583 584 /* Copy the IRET target to the new stack. */ 585 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); 586 587 /* Copy the remainder of the stack from the current stack. */ 588 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); 589 590 BUG_ON(!user_mode_vm(&new_stack->regs)); 591 return new_stack; 592 } 593 NOKPROBE_SYMBOL(fixup_bad_iret); 594 #endif 595 596 /* 597 * Our handling of the processor debug registers is non-trivial. 598 * We do not clear them on entry and exit from the kernel. Therefore 599 * it is possible to get a watchpoint trap here from inside the kernel. 600 * However, the code in ./ptrace.c has ensured that the user can 601 * only set watchpoints on userspace addresses. Therefore the in-kernel 602 * watchpoint trap can only occur in code which is reading/writing 603 * from user space. Such code must not hold kernel locks (since it 604 * can equally take a page fault), therefore it is safe to call 605 * force_sig_info even though that claims and releases locks. 606 * 607 * Code in ./signal.c ensures that the debug control register 608 * is restored before we deliver any signal, and therefore that 609 * user code runs with the correct debug control register even though 610 * we clear it here. 611 * 612 * Being careful here means that we don't have to be as careful in a 613 * lot of more complicated places (task switching can be a bit lazy 614 * about restoring all the debug state, and ptrace doesn't have to 615 * find every occurrence of the TF bit that could be saved away even 616 * by user code) 617 * 618 * May run on IST stack. 619 */ 620 dotraplinkage void do_debug(struct pt_regs *regs, long error_code) 621 { 622 struct task_struct *tsk = current; 623 enum ctx_state prev_state; 624 int user_icebp = 0; 625 unsigned long dr6; 626 int si_code; 627 628 prev_state = ist_enter(regs); 629 630 get_debugreg(dr6, 6); 631 632 /* Filter out all the reserved bits which are preset to 1 */ 633 dr6 &= ~DR6_RESERVED; 634 635 /* 636 * If dr6 has no reason to give us about the origin of this trap, 637 * then it's very likely the result of an icebp/int01 trap. 638 * User wants a sigtrap for that. 639 */ 640 if (!dr6 && user_mode(regs)) 641 user_icebp = 1; 642 643 /* Catch kmemcheck conditions first of all! */ 644 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 645 goto exit; 646 647 /* DR6 may or may not be cleared by the CPU */ 648 set_debugreg(0, 6); 649 650 /* 651 * The processor cleared BTF, so don't mark that we need it set. 652 */ 653 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 654 655 /* Store the virtualized DR6 value */ 656 tsk->thread.debugreg6 = dr6; 657 658 #ifdef CONFIG_KPROBES 659 if (kprobe_debug_handler(regs)) 660 goto exit; 661 #endif 662 663 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, 664 SIGTRAP) == NOTIFY_STOP) 665 goto exit; 666 667 /* 668 * Let others (NMI) know that the debug stack is in use 669 * as we may switch to the interrupt stack. 670 */ 671 debug_stack_usage_inc(); 672 673 /* It's safe to allow irq's after DR6 has been saved */ 674 preempt_conditional_sti(regs); 675 676 if (regs->flags & X86_VM_MASK) { 677 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 678 X86_TRAP_DB); 679 preempt_conditional_cli(regs); 680 debug_stack_usage_dec(); 681 goto exit; 682 } 683 684 /* 685 * Single-stepping through system calls: ignore any exceptions in 686 * kernel space, but re-enable TF when returning to user mode. 687 * 688 * We already checked v86 mode above, so we can check for kernel mode 689 * by just checking the CPL of CS. 690 */ 691 if ((dr6 & DR_STEP) && !user_mode(regs)) { 692 tsk->thread.debugreg6 &= ~DR_STEP; 693 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 694 regs->flags &= ~X86_EFLAGS_TF; 695 } 696 si_code = get_si_code(tsk->thread.debugreg6); 697 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 698 send_sigtrap(tsk, regs, error_code, si_code); 699 preempt_conditional_cli(regs); 700 debug_stack_usage_dec(); 701 702 exit: 703 ist_exit(regs, prev_state); 704 } 705 NOKPROBE_SYMBOL(do_debug); 706 707 /* 708 * Note that we play around with the 'TS' bit in an attempt to get 709 * the correct behaviour even in the presence of the asynchronous 710 * IRQ13 behaviour 711 */ 712 static void math_error(struct pt_regs *regs, int error_code, int trapnr) 713 { 714 struct task_struct *task = current; 715 siginfo_t info; 716 unsigned short err; 717 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 718 "simd exception"; 719 720 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 721 return; 722 conditional_sti(regs); 723 724 if (!user_mode_vm(regs)) 725 { 726 if (!fixup_exception(regs)) { 727 task->thread.error_code = error_code; 728 task->thread.trap_nr = trapnr; 729 die(str, regs, error_code); 730 } 731 return; 732 } 733 734 /* 735 * Save the info for the exception handler and clear the error. 736 */ 737 save_init_fpu(task); 738 task->thread.trap_nr = trapnr; 739 task->thread.error_code = error_code; 740 info.si_signo = SIGFPE; 741 info.si_errno = 0; 742 info.si_addr = (void __user *)uprobe_get_trap_addr(regs); 743 if (trapnr == X86_TRAP_MF) { 744 unsigned short cwd, swd; 745 /* 746 * (~cwd & swd) will mask out exceptions that are not set to unmasked 747 * status. 0x3f is the exception bits in these regs, 0x200 is the 748 * C1 reg you need in case of a stack fault, 0x040 is the stack 749 * fault bit. We should only be taking one exception at a time, 750 * so if this combination doesn't produce any single exception, 751 * then we have a bad program that isn't synchronizing its FPU usage 752 * and it will suffer the consequences since we won't be able to 753 * fully reproduce the context of the exception 754 */ 755 cwd = get_fpu_cwd(task); 756 swd = get_fpu_swd(task); 757 758 err = swd & ~cwd; 759 } else { 760 /* 761 * The SIMD FPU exceptions are handled a little differently, as there 762 * is only a single status/control register. Thus, to determine which 763 * unmasked exception was caught we must mask the exception mask bits 764 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 765 */ 766 unsigned short mxcsr = get_fpu_mxcsr(task); 767 err = ~(mxcsr >> 7) & mxcsr; 768 } 769 770 if (err & 0x001) { /* Invalid op */ 771 /* 772 * swd & 0x240 == 0x040: Stack Underflow 773 * swd & 0x240 == 0x240: Stack Overflow 774 * User must clear the SF bit (0x40) if set 775 */ 776 info.si_code = FPE_FLTINV; 777 } else if (err & 0x004) { /* Divide by Zero */ 778 info.si_code = FPE_FLTDIV; 779 } else if (err & 0x008) { /* Overflow */ 780 info.si_code = FPE_FLTOVF; 781 } else if (err & 0x012) { /* Denormal, Underflow */ 782 info.si_code = FPE_FLTUND; 783 } else if (err & 0x020) { /* Precision */ 784 info.si_code = FPE_FLTRES; 785 } else { 786 /* 787 * If we're using IRQ 13, or supposedly even some trap 788 * X86_TRAP_MF implementations, it's possible 789 * we get a spurious trap, which is not an error. 790 */ 791 return; 792 } 793 force_sig_info(SIGFPE, &info, task); 794 } 795 796 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 797 { 798 enum ctx_state prev_state; 799 800 prev_state = exception_enter(); 801 math_error(regs, error_code, X86_TRAP_MF); 802 exception_exit(prev_state); 803 } 804 805 dotraplinkage void 806 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 807 { 808 enum ctx_state prev_state; 809 810 prev_state = exception_enter(); 811 math_error(regs, error_code, X86_TRAP_XF); 812 exception_exit(prev_state); 813 } 814 815 dotraplinkage void 816 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 817 { 818 conditional_sti(regs); 819 #if 0 820 /* No need to warn about this any longer. */ 821 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 822 #endif 823 } 824 825 asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) 826 { 827 } 828 829 asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) 830 { 831 } 832 833 /* 834 * 'math_state_restore()' saves the current math information in the 835 * old math state array, and gets the new ones from the current task 836 * 837 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 838 * Don't touch unless you *really* know how it works. 839 * 840 * Must be called with kernel preemption disabled (eg with local 841 * local interrupts as in the case of do_device_not_available). 842 */ 843 void math_state_restore(void) 844 { 845 struct task_struct *tsk = current; 846 847 if (!tsk_used_math(tsk)) { 848 local_irq_enable(); 849 /* 850 * does a slab alloc which can sleep 851 */ 852 if (init_fpu(tsk)) { 853 /* 854 * ran out of memory! 855 */ 856 do_group_exit(SIGKILL); 857 return; 858 } 859 local_irq_disable(); 860 } 861 862 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ 863 kernel_fpu_disable(); 864 __thread_fpu_begin(tsk); 865 if (unlikely(restore_fpu_checking(tsk))) { 866 drop_init_fpu(tsk); 867 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 868 } else { 869 tsk->thread.fpu_counter++; 870 } 871 kernel_fpu_enable(); 872 } 873 EXPORT_SYMBOL_GPL(math_state_restore); 874 875 dotraplinkage void 876 do_device_not_available(struct pt_regs *regs, long error_code) 877 { 878 enum ctx_state prev_state; 879 880 prev_state = exception_enter(); 881 BUG_ON(use_eager_fpu()); 882 883 #ifdef CONFIG_MATH_EMULATION 884 if (read_cr0() & X86_CR0_EM) { 885 struct math_emu_info info = { }; 886 887 conditional_sti(regs); 888 889 info.regs = regs; 890 math_emulate(&info); 891 exception_exit(prev_state); 892 return; 893 } 894 #endif 895 math_state_restore(); /* interrupts still off */ 896 #ifdef CONFIG_X86_32 897 conditional_sti(regs); 898 #endif 899 exception_exit(prev_state); 900 } 901 NOKPROBE_SYMBOL(do_device_not_available); 902 903 #ifdef CONFIG_X86_32 904 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 905 { 906 siginfo_t info; 907 enum ctx_state prev_state; 908 909 prev_state = exception_enter(); 910 local_irq_enable(); 911 912 info.si_signo = SIGILL; 913 info.si_errno = 0; 914 info.si_code = ILL_BADSTK; 915 info.si_addr = NULL; 916 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 917 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 918 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 919 &info); 920 } 921 exception_exit(prev_state); 922 } 923 #endif 924 925 /* Set of traps needed for early debugging. */ 926 void __init early_trap_init(void) 927 { 928 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 929 /* int3 can be called from all */ 930 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 931 #ifdef CONFIG_X86_32 932 set_intr_gate(X86_TRAP_PF, page_fault); 933 #endif 934 load_idt(&idt_descr); 935 } 936 937 void __init early_trap_pf_init(void) 938 { 939 #ifdef CONFIG_X86_64 940 set_intr_gate(X86_TRAP_PF, page_fault); 941 #endif 942 } 943 944 void __init trap_init(void) 945 { 946 int i; 947 948 #ifdef CONFIG_EISA 949 void __iomem *p = early_ioremap(0x0FFFD9, 4); 950 951 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 952 EISA_bus = 1; 953 early_iounmap(p, 4); 954 #endif 955 956 set_intr_gate(X86_TRAP_DE, divide_error); 957 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 958 /* int4 can be called from all */ 959 set_system_intr_gate(X86_TRAP_OF, &overflow); 960 set_intr_gate(X86_TRAP_BR, bounds); 961 set_intr_gate(X86_TRAP_UD, invalid_op); 962 set_intr_gate(X86_TRAP_NM, device_not_available); 963 #ifdef CONFIG_X86_32 964 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 965 #else 966 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 967 #endif 968 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); 969 set_intr_gate(X86_TRAP_TS, invalid_TSS); 970 set_intr_gate(X86_TRAP_NP, segment_not_present); 971 set_intr_gate(X86_TRAP_SS, stack_segment); 972 set_intr_gate(X86_TRAP_GP, general_protection); 973 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); 974 set_intr_gate(X86_TRAP_MF, coprocessor_error); 975 set_intr_gate(X86_TRAP_AC, alignment_check); 976 #ifdef CONFIG_X86_MCE 977 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 978 #endif 979 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); 980 981 /* Reserve all the builtin and the syscall vector: */ 982 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 983 set_bit(i, used_vectors); 984 985 #ifdef CONFIG_IA32_EMULATION 986 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 987 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 988 #endif 989 990 #ifdef CONFIG_X86_32 991 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 992 set_bit(SYSCALL_VECTOR, used_vectors); 993 #endif 994 995 /* 996 * Set the IDT descriptor to a fixed read-only location, so that the 997 * "sidt" instruction will not leak the location of the kernel, and 998 * to defend the IDT against arbitrary memory write vulnerabilities. 999 * It will be reloaded in cpu_init() */ 1000 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 1001 idt_descr.address = fix_to_virt(FIX_RO_IDT); 1002 1003 /* 1004 * Should be a barrier for any external CPU state: 1005 */ 1006 cpu_init(); 1007 1008 x86_init.irqs.trap_init(); 1009 1010 #ifdef CONFIG_X86_64 1011 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); 1012 set_nmi_gate(X86_TRAP_DB, &debug); 1013 set_nmi_gate(X86_TRAP_BP, &int3); 1014 #endif 1015 } 1016