1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/context_tracking.h> 16 #include <linux/interrupt.h> 17 #include <linux/kallsyms.h> 18 #include <linux/spinlock.h> 19 #include <linux/kprobes.h> 20 #include <linux/uaccess.h> 21 #include <linux/kdebug.h> 22 #include <linux/kgdb.h> 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/ptrace.h> 26 #include <linux/string.h> 27 #include <linux/delay.h> 28 #include <linux/errno.h> 29 #include <linux/kexec.h> 30 #include <linux/sched.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/bug.h> 34 #include <linux/nmi.h> 35 #include <linux/mm.h> 36 #include <linux/smp.h> 37 #include <linux/io.h> 38 39 #ifdef CONFIG_EISA 40 #include <linux/ioport.h> 41 #include <linux/eisa.h> 42 #endif 43 44 #if defined(CONFIG_EDAC) 45 #include <linux/edac.h> 46 #endif 47 48 #include <asm/kmemcheck.h> 49 #include <asm/stacktrace.h> 50 #include <asm/processor.h> 51 #include <asm/debugreg.h> 52 #include <linux/atomic.h> 53 #include <asm/ftrace.h> 54 #include <asm/traps.h> 55 #include <asm/desc.h> 56 #include <asm/i387.h> 57 #include <asm/fpu-internal.h> 58 #include <asm/mce.h> 59 #include <asm/mach_traps.h> 60 61 #ifdef CONFIG_X86_64 62 #include <asm/x86_init.h> 63 #include <asm/pgalloc.h> 64 #include <asm/proto.h> 65 #else 66 #include <asm/processor-flags.h> 67 #include <asm/setup.h> 68 69 asmlinkage int system_call(void); 70 71 /* 72 * The IDT has to be page-aligned to simplify the Pentium 73 * F0 0F bug workaround. 74 */ 75 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 76 #endif 77 78 DECLARE_BITMAP(used_vectors, NR_VECTORS); 79 EXPORT_SYMBOL_GPL(used_vectors); 80 81 static inline void conditional_sti(struct pt_regs *regs) 82 { 83 if (regs->flags & X86_EFLAGS_IF) 84 local_irq_enable(); 85 } 86 87 static inline void preempt_conditional_sti(struct pt_regs *regs) 88 { 89 inc_preempt_count(); 90 if (regs->flags & X86_EFLAGS_IF) 91 local_irq_enable(); 92 } 93 94 static inline void conditional_cli(struct pt_regs *regs) 95 { 96 if (regs->flags & X86_EFLAGS_IF) 97 local_irq_disable(); 98 } 99 100 static inline void preempt_conditional_cli(struct pt_regs *regs) 101 { 102 if (regs->flags & X86_EFLAGS_IF) 103 local_irq_disable(); 104 dec_preempt_count(); 105 } 106 107 static int __kprobes 108 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 109 struct pt_regs *regs, long error_code) 110 { 111 #ifdef CONFIG_X86_32 112 if (regs->flags & X86_VM_MASK) { 113 /* 114 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 115 * On nmi (interrupt 2), do_trap should not be called. 116 */ 117 if (trapnr < X86_TRAP_UD) { 118 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, 119 error_code, trapnr)) 120 return 0; 121 } 122 return -1; 123 } 124 #endif 125 if (!user_mode(regs)) { 126 if (!fixup_exception(regs)) { 127 tsk->thread.error_code = error_code; 128 tsk->thread.trap_nr = trapnr; 129 die(str, regs, error_code); 130 } 131 return 0; 132 } 133 134 return -1; 135 } 136 137 static void __kprobes 138 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 139 long error_code, siginfo_t *info) 140 { 141 struct task_struct *tsk = current; 142 143 144 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) 145 return; 146 /* 147 * We want error_code and trap_nr set for userspace faults and 148 * kernelspace faults which result in die(), but not 149 * kernelspace faults which are fixed up. die() gives the 150 * process no chance to handle the signal and notice the 151 * kernel fault information, so that won't result in polluting 152 * the information about previously queued, but not yet 153 * delivered, faults. See also do_general_protection below. 154 */ 155 tsk->thread.error_code = error_code; 156 tsk->thread.trap_nr = trapnr; 157 158 #ifdef CONFIG_X86_64 159 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 160 printk_ratelimit()) { 161 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 162 tsk->comm, tsk->pid, str, 163 regs->ip, regs->sp, error_code); 164 print_vma_addr(" in ", regs->ip); 165 pr_cont("\n"); 166 } 167 #endif 168 169 if (info) 170 force_sig_info(signr, info, tsk); 171 else 172 force_sig(signr, tsk); 173 } 174 175 #define DO_ERROR(trapnr, signr, str, name) \ 176 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 177 { \ 178 enum ctx_state prev_state; \ 179 \ 180 prev_state = exception_enter(); \ 181 if (notify_die(DIE_TRAP, str, regs, error_code, \ 182 trapnr, signr) == NOTIFY_STOP) { \ 183 exception_exit(prev_state); \ 184 return; \ 185 } \ 186 conditional_sti(regs); \ 187 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 188 exception_exit(prev_state); \ 189 } 190 191 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 192 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 193 { \ 194 siginfo_t info; \ 195 enum ctx_state prev_state; \ 196 \ 197 info.si_signo = signr; \ 198 info.si_errno = 0; \ 199 info.si_code = sicode; \ 200 info.si_addr = (void __user *)siaddr; \ 201 prev_state = exception_enter(); \ 202 if (notify_die(DIE_TRAP, str, regs, error_code, \ 203 trapnr, signr) == NOTIFY_STOP) { \ 204 exception_exit(prev_state); \ 205 return; \ 206 } \ 207 conditional_sti(regs); \ 208 do_trap(trapnr, signr, str, regs, error_code, &info); \ 209 exception_exit(prev_state); \ 210 } 211 212 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 213 regs->ip) 214 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 215 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 216 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 217 regs->ip) 218 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 219 coprocessor_segment_overrun) 220 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 221 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 222 #ifdef CONFIG_X86_32 223 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 224 #endif 225 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 226 BUS_ADRALN, 0) 227 228 #ifdef CONFIG_X86_64 229 /* Runs on IST stack */ 230 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 231 { 232 enum ctx_state prev_state; 233 234 prev_state = exception_enter(); 235 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 236 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { 237 preempt_conditional_sti(regs); 238 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 239 preempt_conditional_cli(regs); 240 } 241 exception_exit(prev_state); 242 } 243 244 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 245 { 246 static const char str[] = "double fault"; 247 struct task_struct *tsk = current; 248 249 exception_enter(); 250 /* Return not checked because double check cannot be ignored */ 251 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 252 253 tsk->thread.error_code = error_code; 254 tsk->thread.trap_nr = X86_TRAP_DF; 255 256 /* 257 * This is always a kernel trap and never fixable (and thus must 258 * never return). 259 */ 260 for (;;) 261 die(str, regs, error_code); 262 } 263 #endif 264 265 dotraplinkage void __kprobes 266 do_general_protection(struct pt_regs *regs, long error_code) 267 { 268 struct task_struct *tsk; 269 enum ctx_state prev_state; 270 271 prev_state = exception_enter(); 272 conditional_sti(regs); 273 274 #ifdef CONFIG_X86_32 275 if (regs->flags & X86_VM_MASK) { 276 local_irq_enable(); 277 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 278 goto exit; 279 } 280 #endif 281 282 tsk = current; 283 if (!user_mode(regs)) { 284 if (fixup_exception(regs)) 285 goto exit; 286 287 tsk->thread.error_code = error_code; 288 tsk->thread.trap_nr = X86_TRAP_GP; 289 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 290 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 291 die("general protection fault", regs, error_code); 292 goto exit; 293 } 294 295 tsk->thread.error_code = error_code; 296 tsk->thread.trap_nr = X86_TRAP_GP; 297 298 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 299 printk_ratelimit()) { 300 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 301 tsk->comm, task_pid_nr(tsk), 302 regs->ip, regs->sp, error_code); 303 print_vma_addr(" in ", regs->ip); 304 pr_cont("\n"); 305 } 306 307 force_sig(SIGSEGV, tsk); 308 exit: 309 exception_exit(prev_state); 310 } 311 312 /* May run on IST stack. */ 313 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 314 { 315 enum ctx_state prev_state; 316 317 #ifdef CONFIG_DYNAMIC_FTRACE 318 /* 319 * ftrace must be first, everything else may cause a recursive crash. 320 * See note by declaration of modifying_ftrace_code in ftrace.c 321 */ 322 if (unlikely(atomic_read(&modifying_ftrace_code)) && 323 ftrace_int3_handler(regs)) 324 return; 325 #endif 326 prev_state = exception_enter(); 327 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 328 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 329 SIGTRAP) == NOTIFY_STOP) 330 goto exit; 331 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 332 333 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 334 SIGTRAP) == NOTIFY_STOP) 335 goto exit; 336 337 /* 338 * Let others (NMI) know that the debug stack is in use 339 * as we may switch to the interrupt stack. 340 */ 341 debug_stack_usage_inc(); 342 preempt_conditional_sti(regs); 343 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 344 preempt_conditional_cli(regs); 345 debug_stack_usage_dec(); 346 exit: 347 exception_exit(prev_state); 348 } 349 350 #ifdef CONFIG_X86_64 351 /* 352 * Help handler running on IST stack to switch back to user stack 353 * for scheduling or signal handling. The actual stack switch is done in 354 * entry.S 355 */ 356 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 357 { 358 struct pt_regs *regs = eregs; 359 /* Did already sync */ 360 if (eregs == (struct pt_regs *)eregs->sp) 361 ; 362 /* Exception from user space */ 363 else if (user_mode(eregs)) 364 regs = task_pt_regs(current); 365 /* 366 * Exception from kernel and interrupts are enabled. Move to 367 * kernel process stack. 368 */ 369 else if (eregs->flags & X86_EFLAGS_IF) 370 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 371 if (eregs != regs) 372 *regs = *eregs; 373 return regs; 374 } 375 #endif 376 377 /* 378 * Our handling of the processor debug registers is non-trivial. 379 * We do not clear them on entry and exit from the kernel. Therefore 380 * it is possible to get a watchpoint trap here from inside the kernel. 381 * However, the code in ./ptrace.c has ensured that the user can 382 * only set watchpoints on userspace addresses. Therefore the in-kernel 383 * watchpoint trap can only occur in code which is reading/writing 384 * from user space. Such code must not hold kernel locks (since it 385 * can equally take a page fault), therefore it is safe to call 386 * force_sig_info even though that claims and releases locks. 387 * 388 * Code in ./signal.c ensures that the debug control register 389 * is restored before we deliver any signal, and therefore that 390 * user code runs with the correct debug control register even though 391 * we clear it here. 392 * 393 * Being careful here means that we don't have to be as careful in a 394 * lot of more complicated places (task switching can be a bit lazy 395 * about restoring all the debug state, and ptrace doesn't have to 396 * find every occurrence of the TF bit that could be saved away even 397 * by user code) 398 * 399 * May run on IST stack. 400 */ 401 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 402 { 403 struct task_struct *tsk = current; 404 enum ctx_state prev_state; 405 int user_icebp = 0; 406 unsigned long dr6; 407 int si_code; 408 409 prev_state = exception_enter(); 410 411 get_debugreg(dr6, 6); 412 413 /* Filter out all the reserved bits which are preset to 1 */ 414 dr6 &= ~DR6_RESERVED; 415 416 /* 417 * If dr6 has no reason to give us about the origin of this trap, 418 * then it's very likely the result of an icebp/int01 trap. 419 * User wants a sigtrap for that. 420 */ 421 if (!dr6 && user_mode(regs)) 422 user_icebp = 1; 423 424 /* Catch kmemcheck conditions first of all! */ 425 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 426 goto exit; 427 428 /* DR6 may or may not be cleared by the CPU */ 429 set_debugreg(0, 6); 430 431 /* 432 * The processor cleared BTF, so don't mark that we need it set. 433 */ 434 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 435 436 /* Store the virtualized DR6 value */ 437 tsk->thread.debugreg6 = dr6; 438 439 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 440 SIGTRAP) == NOTIFY_STOP) 441 goto exit; 442 443 /* 444 * Let others (NMI) know that the debug stack is in use 445 * as we may switch to the interrupt stack. 446 */ 447 debug_stack_usage_inc(); 448 449 /* It's safe to allow irq's after DR6 has been saved */ 450 preempt_conditional_sti(regs); 451 452 if (regs->flags & X86_VM_MASK) { 453 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 454 X86_TRAP_DB); 455 preempt_conditional_cli(regs); 456 debug_stack_usage_dec(); 457 goto exit; 458 } 459 460 /* 461 * Single-stepping through system calls: ignore any exceptions in 462 * kernel space, but re-enable TF when returning to user mode. 463 * 464 * We already checked v86 mode above, so we can check for kernel mode 465 * by just checking the CPL of CS. 466 */ 467 if ((dr6 & DR_STEP) && !user_mode(regs)) { 468 tsk->thread.debugreg6 &= ~DR_STEP; 469 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 470 regs->flags &= ~X86_EFLAGS_TF; 471 } 472 si_code = get_si_code(tsk->thread.debugreg6); 473 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 474 send_sigtrap(tsk, regs, error_code, si_code); 475 preempt_conditional_cli(regs); 476 debug_stack_usage_dec(); 477 478 exit: 479 exception_exit(prev_state); 480 } 481 482 /* 483 * Note that we play around with the 'TS' bit in an attempt to get 484 * the correct behaviour even in the presence of the asynchronous 485 * IRQ13 behaviour 486 */ 487 void math_error(struct pt_regs *regs, int error_code, int trapnr) 488 { 489 struct task_struct *task = current; 490 siginfo_t info; 491 unsigned short err; 492 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 493 "simd exception"; 494 495 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 496 return; 497 conditional_sti(regs); 498 499 if (!user_mode_vm(regs)) 500 { 501 if (!fixup_exception(regs)) { 502 task->thread.error_code = error_code; 503 task->thread.trap_nr = trapnr; 504 die(str, regs, error_code); 505 } 506 return; 507 } 508 509 /* 510 * Save the info for the exception handler and clear the error. 511 */ 512 save_init_fpu(task); 513 task->thread.trap_nr = trapnr; 514 task->thread.error_code = error_code; 515 info.si_signo = SIGFPE; 516 info.si_errno = 0; 517 info.si_addr = (void __user *)regs->ip; 518 if (trapnr == X86_TRAP_MF) { 519 unsigned short cwd, swd; 520 /* 521 * (~cwd & swd) will mask out exceptions that are not set to unmasked 522 * status. 0x3f is the exception bits in these regs, 0x200 is the 523 * C1 reg you need in case of a stack fault, 0x040 is the stack 524 * fault bit. We should only be taking one exception at a time, 525 * so if this combination doesn't produce any single exception, 526 * then we have a bad program that isn't synchronizing its FPU usage 527 * and it will suffer the consequences since we won't be able to 528 * fully reproduce the context of the exception 529 */ 530 cwd = get_fpu_cwd(task); 531 swd = get_fpu_swd(task); 532 533 err = swd & ~cwd; 534 } else { 535 /* 536 * The SIMD FPU exceptions are handled a little differently, as there 537 * is only a single status/control register. Thus, to determine which 538 * unmasked exception was caught we must mask the exception mask bits 539 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 540 */ 541 unsigned short mxcsr = get_fpu_mxcsr(task); 542 err = ~(mxcsr >> 7) & mxcsr; 543 } 544 545 if (err & 0x001) { /* Invalid op */ 546 /* 547 * swd & 0x240 == 0x040: Stack Underflow 548 * swd & 0x240 == 0x240: Stack Overflow 549 * User must clear the SF bit (0x40) if set 550 */ 551 info.si_code = FPE_FLTINV; 552 } else if (err & 0x004) { /* Divide by Zero */ 553 info.si_code = FPE_FLTDIV; 554 } else if (err & 0x008) { /* Overflow */ 555 info.si_code = FPE_FLTOVF; 556 } else if (err & 0x012) { /* Denormal, Underflow */ 557 info.si_code = FPE_FLTUND; 558 } else if (err & 0x020) { /* Precision */ 559 info.si_code = FPE_FLTRES; 560 } else { 561 /* 562 * If we're using IRQ 13, or supposedly even some trap 563 * X86_TRAP_MF implementations, it's possible 564 * we get a spurious trap, which is not an error. 565 */ 566 return; 567 } 568 force_sig_info(SIGFPE, &info, task); 569 } 570 571 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 572 { 573 enum ctx_state prev_state; 574 575 prev_state = exception_enter(); 576 math_error(regs, error_code, X86_TRAP_MF); 577 exception_exit(prev_state); 578 } 579 580 dotraplinkage void 581 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 582 { 583 enum ctx_state prev_state; 584 585 prev_state = exception_enter(); 586 math_error(regs, error_code, X86_TRAP_XF); 587 exception_exit(prev_state); 588 } 589 590 dotraplinkage void 591 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 592 { 593 conditional_sti(regs); 594 #if 0 595 /* No need to warn about this any longer. */ 596 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 597 #endif 598 } 599 600 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 601 { 602 } 603 604 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 605 { 606 } 607 608 /* 609 * 'math_state_restore()' saves the current math information in the 610 * old math state array, and gets the new ones from the current task 611 * 612 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 613 * Don't touch unless you *really* know how it works. 614 * 615 * Must be called with kernel preemption disabled (eg with local 616 * local interrupts as in the case of do_device_not_available). 617 */ 618 void math_state_restore(void) 619 { 620 struct task_struct *tsk = current; 621 622 if (!tsk_used_math(tsk)) { 623 local_irq_enable(); 624 /* 625 * does a slab alloc which can sleep 626 */ 627 if (init_fpu(tsk)) { 628 /* 629 * ran out of memory! 630 */ 631 do_group_exit(SIGKILL); 632 return; 633 } 634 local_irq_disable(); 635 } 636 637 __thread_fpu_begin(tsk); 638 639 /* 640 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 641 */ 642 if (unlikely(restore_fpu_checking(tsk))) { 643 drop_init_fpu(tsk); 644 force_sig(SIGSEGV, tsk); 645 return; 646 } 647 648 tsk->fpu_counter++; 649 } 650 EXPORT_SYMBOL_GPL(math_state_restore); 651 652 dotraplinkage void __kprobes 653 do_device_not_available(struct pt_regs *regs, long error_code) 654 { 655 enum ctx_state prev_state; 656 657 prev_state = exception_enter(); 658 BUG_ON(use_eager_fpu()); 659 660 #ifdef CONFIG_MATH_EMULATION 661 if (read_cr0() & X86_CR0_EM) { 662 struct math_emu_info info = { }; 663 664 conditional_sti(regs); 665 666 info.regs = regs; 667 math_emulate(&info); 668 exception_exit(prev_state); 669 return; 670 } 671 #endif 672 math_state_restore(); /* interrupts still off */ 673 #ifdef CONFIG_X86_32 674 conditional_sti(regs); 675 #endif 676 exception_exit(prev_state); 677 } 678 679 #ifdef CONFIG_X86_32 680 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 681 { 682 siginfo_t info; 683 enum ctx_state prev_state; 684 685 prev_state = exception_enter(); 686 local_irq_enable(); 687 688 info.si_signo = SIGILL; 689 info.si_errno = 0; 690 info.si_code = ILL_BADSTK; 691 info.si_addr = NULL; 692 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 693 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { 694 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 695 &info); 696 } 697 exception_exit(prev_state); 698 } 699 #endif 700 701 /* Set of traps needed for early debugging. */ 702 void __init early_trap_init(void) 703 { 704 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 705 /* int3 can be called from all */ 706 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 707 #ifdef CONFIG_X86_32 708 set_intr_gate(X86_TRAP_PF, &page_fault); 709 #endif 710 load_idt(&idt_descr); 711 } 712 713 void __init early_trap_pf_init(void) 714 { 715 #ifdef CONFIG_X86_64 716 set_intr_gate(X86_TRAP_PF, &page_fault); 717 #endif 718 } 719 720 void __init trap_init(void) 721 { 722 int i; 723 724 #ifdef CONFIG_EISA 725 void __iomem *p = early_ioremap(0x0FFFD9, 4); 726 727 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 728 EISA_bus = 1; 729 early_iounmap(p, 4); 730 #endif 731 732 set_intr_gate(X86_TRAP_DE, ÷_error); 733 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 734 /* int4 can be called from all */ 735 set_system_intr_gate(X86_TRAP_OF, &overflow); 736 set_intr_gate(X86_TRAP_BR, &bounds); 737 set_intr_gate(X86_TRAP_UD, &invalid_op); 738 set_intr_gate(X86_TRAP_NM, &device_not_available); 739 #ifdef CONFIG_X86_32 740 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 741 #else 742 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 743 #endif 744 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 745 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 746 set_intr_gate(X86_TRAP_NP, &segment_not_present); 747 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 748 set_intr_gate(X86_TRAP_GP, &general_protection); 749 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 750 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 751 set_intr_gate(X86_TRAP_AC, &alignment_check); 752 #ifdef CONFIG_X86_MCE 753 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 754 #endif 755 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 756 757 /* Reserve all the builtin and the syscall vector: */ 758 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 759 set_bit(i, used_vectors); 760 761 #ifdef CONFIG_IA32_EMULATION 762 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 763 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 764 #endif 765 766 #ifdef CONFIG_X86_32 767 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 768 set_bit(SYSCALL_VECTOR, used_vectors); 769 #endif 770 771 /* 772 * Should be a barrier for any external CPU state: 773 */ 774 cpu_init(); 775 776 x86_init.irqs.trap_init(); 777 778 #ifdef CONFIG_X86_64 779 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 780 set_nmi_gate(X86_TRAP_DB, &debug); 781 set_nmi_gate(X86_TRAP_BP, &int3); 782 #endif 783 } 784