1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 #include <linux/interrupt.h> 13 #include <linux/kallsyms.h> 14 #include <linux/spinlock.h> 15 #include <linux/kprobes.h> 16 #include <linux/uaccess.h> 17 #include <linux/kdebug.h> 18 #include <linux/kgdb.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/ptrace.h> 22 #include <linux/string.h> 23 #include <linux/delay.h> 24 #include <linux/errno.h> 25 #include <linux/kexec.h> 26 #include <linux/sched.h> 27 #include <linux/timer.h> 28 #include <linux/init.h> 29 #include <linux/bug.h> 30 #include <linux/nmi.h> 31 #include <linux/mm.h> 32 #include <linux/smp.h> 33 #include <linux/io.h> 34 35 #ifdef CONFIG_EISA 36 #include <linux/ioport.h> 37 #include <linux/eisa.h> 38 #endif 39 40 #if defined(CONFIG_EDAC) 41 #include <linux/edac.h> 42 #endif 43 44 #include <asm/kmemcheck.h> 45 #include <asm/stacktrace.h> 46 #include <asm/processor.h> 47 #include <asm/debugreg.h> 48 #include <linux/atomic.h> 49 #include <asm/ftrace.h> 50 #include <asm/traps.h> 51 #include <asm/desc.h> 52 #include <asm/i387.h> 53 #include <asm/fpu-internal.h> 54 #include <asm/mce.h> 55 56 #include <asm/mach_traps.h> 57 58 #ifdef CONFIG_X86_64 59 #include <asm/x86_init.h> 60 #include <asm/pgalloc.h> 61 #include <asm/proto.h> 62 #else 63 #include <asm/processor-flags.h> 64 #include <asm/setup.h> 65 66 asmlinkage int system_call(void); 67 68 /* Do we ignore FPU interrupts ? */ 69 char ignore_fpu_irq; 70 71 /* 72 * The IDT has to be page-aligned to simplify the Pentium 73 * F0 0F bug workaround. 74 */ 75 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 76 #endif 77 78 DECLARE_BITMAP(used_vectors, NR_VECTORS); 79 EXPORT_SYMBOL_GPL(used_vectors); 80 81 static inline void conditional_sti(struct pt_regs *regs) 82 { 83 if (regs->flags & X86_EFLAGS_IF) 84 local_irq_enable(); 85 } 86 87 static inline void preempt_conditional_sti(struct pt_regs *regs) 88 { 89 inc_preempt_count(); 90 if (regs->flags & X86_EFLAGS_IF) 91 local_irq_enable(); 92 } 93 94 static inline void conditional_cli(struct pt_regs *regs) 95 { 96 if (regs->flags & X86_EFLAGS_IF) 97 local_irq_disable(); 98 } 99 100 static inline void preempt_conditional_cli(struct pt_regs *regs) 101 { 102 if (regs->flags & X86_EFLAGS_IF) 103 local_irq_disable(); 104 dec_preempt_count(); 105 } 106 107 static void __kprobes 108 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 109 long error_code, siginfo_t *info) 110 { 111 struct task_struct *tsk = current; 112 113 #ifdef CONFIG_X86_32 114 if (regs->flags & X86_VM_MASK) { 115 /* 116 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 117 * On nmi (interrupt 2), do_trap should not be called. 118 */ 119 if (trapnr < X86_TRAP_UD) 120 goto vm86_trap; 121 goto trap_signal; 122 } 123 #endif 124 125 if (!user_mode(regs)) 126 goto kernel_trap; 127 128 #ifdef CONFIG_X86_32 129 trap_signal: 130 #endif 131 /* 132 * We want error_code and trap_nr set for userspace faults and 133 * kernelspace faults which result in die(), but not 134 * kernelspace faults which are fixed up. die() gives the 135 * process no chance to handle the signal and notice the 136 * kernel fault information, so that won't result in polluting 137 * the information about previously queued, but not yet 138 * delivered, faults. See also do_general_protection below. 139 */ 140 tsk->thread.error_code = error_code; 141 tsk->thread.trap_nr = trapnr; 142 143 #ifdef CONFIG_X86_64 144 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 145 printk_ratelimit()) { 146 printk(KERN_INFO 147 "%s[%d] trap %s ip:%lx sp:%lx error:%lx", 148 tsk->comm, tsk->pid, str, 149 regs->ip, regs->sp, error_code); 150 print_vma_addr(" in ", regs->ip); 151 printk("\n"); 152 } 153 #endif 154 155 if (info) 156 force_sig_info(signr, info, tsk); 157 else 158 force_sig(signr, tsk); 159 return; 160 161 kernel_trap: 162 if (!fixup_exception(regs)) { 163 tsk->thread.error_code = error_code; 164 tsk->thread.trap_nr = trapnr; 165 die(str, regs, error_code); 166 } 167 return; 168 169 #ifdef CONFIG_X86_32 170 vm86_trap: 171 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 172 error_code, trapnr)) 173 goto trap_signal; 174 return; 175 #endif 176 } 177 178 #define DO_ERROR(trapnr, signr, str, name) \ 179 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 180 { \ 181 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 182 == NOTIFY_STOP) \ 183 return; \ 184 conditional_sti(regs); \ 185 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 186 } 187 188 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 189 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 190 { \ 191 siginfo_t info; \ 192 info.si_signo = signr; \ 193 info.si_errno = 0; \ 194 info.si_code = sicode; \ 195 info.si_addr = (void __user *)siaddr; \ 196 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 197 == NOTIFY_STOP) \ 198 return; \ 199 conditional_sti(regs); \ 200 do_trap(trapnr, signr, str, regs, error_code, &info); \ 201 } 202 203 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 204 regs->ip) 205 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 206 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 207 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 208 regs->ip) 209 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 210 coprocessor_segment_overrun) 211 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 212 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 213 #ifdef CONFIG_X86_32 214 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 215 #endif 216 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 217 BUS_ADRALN, 0) 218 219 #ifdef CONFIG_X86_64 220 /* Runs on IST stack */ 221 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 222 { 223 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 224 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) 225 return; 226 preempt_conditional_sti(regs); 227 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 228 preempt_conditional_cli(regs); 229 } 230 231 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 232 { 233 static const char str[] = "double fault"; 234 struct task_struct *tsk = current; 235 236 /* Return not checked because double check cannot be ignored */ 237 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 238 239 tsk->thread.error_code = error_code; 240 tsk->thread.trap_nr = X86_TRAP_DF; 241 242 /* 243 * This is always a kernel trap and never fixable (and thus must 244 * never return). 245 */ 246 for (;;) 247 die(str, regs, error_code); 248 } 249 #endif 250 251 dotraplinkage void __kprobes 252 do_general_protection(struct pt_regs *regs, long error_code) 253 { 254 struct task_struct *tsk; 255 256 conditional_sti(regs); 257 258 #ifdef CONFIG_X86_32 259 if (regs->flags & X86_VM_MASK) 260 goto gp_in_vm86; 261 #endif 262 263 tsk = current; 264 if (!user_mode(regs)) 265 goto gp_in_kernel; 266 267 tsk->thread.error_code = error_code; 268 tsk->thread.trap_nr = X86_TRAP_GP; 269 270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 271 printk_ratelimit()) { 272 printk(KERN_INFO 273 "%s[%d] general protection ip:%lx sp:%lx error:%lx", 274 tsk->comm, task_pid_nr(tsk), 275 regs->ip, regs->sp, error_code); 276 print_vma_addr(" in ", regs->ip); 277 printk("\n"); 278 } 279 280 force_sig(SIGSEGV, tsk); 281 return; 282 283 #ifdef CONFIG_X86_32 284 gp_in_vm86: 285 local_irq_enable(); 286 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 287 return; 288 #endif 289 290 gp_in_kernel: 291 if (fixup_exception(regs)) 292 return; 293 294 tsk->thread.error_code = error_code; 295 tsk->thread.trap_nr = X86_TRAP_GP; 296 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 297 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) 298 return; 299 die("general protection fault", regs, error_code); 300 } 301 302 /* May run on IST stack. */ 303 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 304 { 305 #ifdef CONFIG_DYNAMIC_FTRACE 306 /* 307 * ftrace must be first, everything else may cause a recursive crash. 308 * See note by declaration of modifying_ftrace_code in ftrace.c 309 */ 310 if (unlikely(atomic_read(&modifying_ftrace_code)) && 311 ftrace_int3_handler(regs)) 312 return; 313 #endif 314 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 315 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 316 SIGTRAP) == NOTIFY_STOP) 317 return; 318 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 319 320 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 321 SIGTRAP) == NOTIFY_STOP) 322 return; 323 324 /* 325 * Let others (NMI) know that the debug stack is in use 326 * as we may switch to the interrupt stack. 327 */ 328 debug_stack_usage_inc(); 329 preempt_conditional_sti(regs); 330 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 331 preempt_conditional_cli(regs); 332 debug_stack_usage_dec(); 333 } 334 335 #ifdef CONFIG_X86_64 336 /* 337 * Help handler running on IST stack to switch back to user stack 338 * for scheduling or signal handling. The actual stack switch is done in 339 * entry.S 340 */ 341 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 342 { 343 struct pt_regs *regs = eregs; 344 /* Did already sync */ 345 if (eregs == (struct pt_regs *)eregs->sp) 346 ; 347 /* Exception from user space */ 348 else if (user_mode(eregs)) 349 regs = task_pt_regs(current); 350 /* 351 * Exception from kernel and interrupts are enabled. Move to 352 * kernel process stack. 353 */ 354 else if (eregs->flags & X86_EFLAGS_IF) 355 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 356 if (eregs != regs) 357 *regs = *eregs; 358 return regs; 359 } 360 #endif 361 362 /* 363 * Our handling of the processor debug registers is non-trivial. 364 * We do not clear them on entry and exit from the kernel. Therefore 365 * it is possible to get a watchpoint trap here from inside the kernel. 366 * However, the code in ./ptrace.c has ensured that the user can 367 * only set watchpoints on userspace addresses. Therefore the in-kernel 368 * watchpoint trap can only occur in code which is reading/writing 369 * from user space. Such code must not hold kernel locks (since it 370 * can equally take a page fault), therefore it is safe to call 371 * force_sig_info even though that claims and releases locks. 372 * 373 * Code in ./signal.c ensures that the debug control register 374 * is restored before we deliver any signal, and therefore that 375 * user code runs with the correct debug control register even though 376 * we clear it here. 377 * 378 * Being careful here means that we don't have to be as careful in a 379 * lot of more complicated places (task switching can be a bit lazy 380 * about restoring all the debug state, and ptrace doesn't have to 381 * find every occurrence of the TF bit that could be saved away even 382 * by user code) 383 * 384 * May run on IST stack. 385 */ 386 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 387 { 388 struct task_struct *tsk = current; 389 int user_icebp = 0; 390 unsigned long dr6; 391 int si_code; 392 393 get_debugreg(dr6, 6); 394 395 /* Filter out all the reserved bits which are preset to 1 */ 396 dr6 &= ~DR6_RESERVED; 397 398 /* 399 * If dr6 has no reason to give us about the origin of this trap, 400 * then it's very likely the result of an icebp/int01 trap. 401 * User wants a sigtrap for that. 402 */ 403 if (!dr6 && user_mode(regs)) 404 user_icebp = 1; 405 406 /* Catch kmemcheck conditions first of all! */ 407 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 408 return; 409 410 /* DR6 may or may not be cleared by the CPU */ 411 set_debugreg(0, 6); 412 413 /* 414 * The processor cleared BTF, so don't mark that we need it set. 415 */ 416 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 417 418 /* Store the virtualized DR6 value */ 419 tsk->thread.debugreg6 = dr6; 420 421 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 422 SIGTRAP) == NOTIFY_STOP) 423 return; 424 425 /* 426 * Let others (NMI) know that the debug stack is in use 427 * as we may switch to the interrupt stack. 428 */ 429 debug_stack_usage_inc(); 430 431 /* It's safe to allow irq's after DR6 has been saved */ 432 preempt_conditional_sti(regs); 433 434 if (regs->flags & X86_VM_MASK) { 435 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 436 X86_TRAP_DB); 437 preempt_conditional_cli(regs); 438 debug_stack_usage_dec(); 439 return; 440 } 441 442 /* 443 * Single-stepping through system calls: ignore any exceptions in 444 * kernel space, but re-enable TF when returning to user mode. 445 * 446 * We already checked v86 mode above, so we can check for kernel mode 447 * by just checking the CPL of CS. 448 */ 449 if ((dr6 & DR_STEP) && !user_mode(regs)) { 450 tsk->thread.debugreg6 &= ~DR_STEP; 451 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 452 regs->flags &= ~X86_EFLAGS_TF; 453 } 454 si_code = get_si_code(tsk->thread.debugreg6); 455 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 456 send_sigtrap(tsk, regs, error_code, si_code); 457 preempt_conditional_cli(regs); 458 debug_stack_usage_dec(); 459 460 return; 461 } 462 463 /* 464 * Note that we play around with the 'TS' bit in an attempt to get 465 * the correct behaviour even in the presence of the asynchronous 466 * IRQ13 behaviour 467 */ 468 void math_error(struct pt_regs *regs, int error_code, int trapnr) 469 { 470 struct task_struct *task = current; 471 siginfo_t info; 472 unsigned short err; 473 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 474 "simd exception"; 475 476 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 477 return; 478 conditional_sti(regs); 479 480 if (!user_mode_vm(regs)) 481 { 482 if (!fixup_exception(regs)) { 483 task->thread.error_code = error_code; 484 task->thread.trap_nr = trapnr; 485 die(str, regs, error_code); 486 } 487 return; 488 } 489 490 /* 491 * Save the info for the exception handler and clear the error. 492 */ 493 save_init_fpu(task); 494 task->thread.trap_nr = trapnr; 495 task->thread.error_code = error_code; 496 info.si_signo = SIGFPE; 497 info.si_errno = 0; 498 info.si_addr = (void __user *)regs->ip; 499 if (trapnr == X86_TRAP_MF) { 500 unsigned short cwd, swd; 501 /* 502 * (~cwd & swd) will mask out exceptions that are not set to unmasked 503 * status. 0x3f is the exception bits in these regs, 0x200 is the 504 * C1 reg you need in case of a stack fault, 0x040 is the stack 505 * fault bit. We should only be taking one exception at a time, 506 * so if this combination doesn't produce any single exception, 507 * then we have a bad program that isn't synchronizing its FPU usage 508 * and it will suffer the consequences since we won't be able to 509 * fully reproduce the context of the exception 510 */ 511 cwd = get_fpu_cwd(task); 512 swd = get_fpu_swd(task); 513 514 err = swd & ~cwd; 515 } else { 516 /* 517 * The SIMD FPU exceptions are handled a little differently, as there 518 * is only a single status/control register. Thus, to determine which 519 * unmasked exception was caught we must mask the exception mask bits 520 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 521 */ 522 unsigned short mxcsr = get_fpu_mxcsr(task); 523 err = ~(mxcsr >> 7) & mxcsr; 524 } 525 526 if (err & 0x001) { /* Invalid op */ 527 /* 528 * swd & 0x240 == 0x040: Stack Underflow 529 * swd & 0x240 == 0x240: Stack Overflow 530 * User must clear the SF bit (0x40) if set 531 */ 532 info.si_code = FPE_FLTINV; 533 } else if (err & 0x004) { /* Divide by Zero */ 534 info.si_code = FPE_FLTDIV; 535 } else if (err & 0x008) { /* Overflow */ 536 info.si_code = FPE_FLTOVF; 537 } else if (err & 0x012) { /* Denormal, Underflow */ 538 info.si_code = FPE_FLTUND; 539 } else if (err & 0x020) { /* Precision */ 540 info.si_code = FPE_FLTRES; 541 } else { 542 /* 543 * If we're using IRQ 13, or supposedly even some trap 544 * X86_TRAP_MF implementations, it's possible 545 * we get a spurious trap, which is not an error. 546 */ 547 return; 548 } 549 force_sig_info(SIGFPE, &info, task); 550 } 551 552 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 553 { 554 #ifdef CONFIG_X86_32 555 ignore_fpu_irq = 1; 556 #endif 557 558 math_error(regs, error_code, X86_TRAP_MF); 559 } 560 561 dotraplinkage void 562 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 563 { 564 math_error(regs, error_code, X86_TRAP_XF); 565 } 566 567 dotraplinkage void 568 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 569 { 570 conditional_sti(regs); 571 #if 0 572 /* No need to warn about this any longer. */ 573 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 574 #endif 575 } 576 577 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 578 { 579 } 580 581 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 582 { 583 } 584 585 /* 586 * 'math_state_restore()' saves the current math information in the 587 * old math state array, and gets the new ones from the current task 588 * 589 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 590 * Don't touch unless you *really* know how it works. 591 * 592 * Must be called with kernel preemption disabled (eg with local 593 * local interrupts as in the case of do_device_not_available). 594 */ 595 void math_state_restore(void) 596 { 597 struct task_struct *tsk = current; 598 599 if (!tsk_used_math(tsk)) { 600 local_irq_enable(); 601 /* 602 * does a slab alloc which can sleep 603 */ 604 if (init_fpu(tsk)) { 605 /* 606 * ran out of memory! 607 */ 608 do_group_exit(SIGKILL); 609 return; 610 } 611 local_irq_disable(); 612 } 613 614 __thread_fpu_begin(tsk); 615 /* 616 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 617 */ 618 if (unlikely(restore_fpu_checking(tsk))) { 619 __thread_fpu_end(tsk); 620 force_sig(SIGSEGV, tsk); 621 return; 622 } 623 624 tsk->fpu_counter++; 625 } 626 EXPORT_SYMBOL_GPL(math_state_restore); 627 628 dotraplinkage void __kprobes 629 do_device_not_available(struct pt_regs *regs, long error_code) 630 { 631 #ifdef CONFIG_MATH_EMULATION 632 if (read_cr0() & X86_CR0_EM) { 633 struct math_emu_info info = { }; 634 635 conditional_sti(regs); 636 637 info.regs = regs; 638 math_emulate(&info); 639 return; 640 } 641 #endif 642 math_state_restore(); /* interrupts still off */ 643 #ifdef CONFIG_X86_32 644 conditional_sti(regs); 645 #endif 646 } 647 648 #ifdef CONFIG_X86_32 649 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 650 { 651 siginfo_t info; 652 local_irq_enable(); 653 654 info.si_signo = SIGILL; 655 info.si_errno = 0; 656 info.si_code = ILL_BADSTK; 657 info.si_addr = NULL; 658 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 659 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) 660 return; 661 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 662 &info); 663 } 664 #endif 665 666 /* Set of traps needed for early debugging. */ 667 void __init early_trap_init(void) 668 { 669 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 670 /* int3 can be called from all */ 671 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 672 set_intr_gate(X86_TRAP_PF, &page_fault); 673 load_idt(&idt_descr); 674 } 675 676 void __init trap_init(void) 677 { 678 int i; 679 680 #ifdef CONFIG_EISA 681 void __iomem *p = early_ioremap(0x0FFFD9, 4); 682 683 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 684 EISA_bus = 1; 685 early_iounmap(p, 4); 686 #endif 687 688 set_intr_gate(X86_TRAP_DE, ÷_error); 689 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 690 /* int4 can be called from all */ 691 set_system_intr_gate(X86_TRAP_OF, &overflow); 692 set_intr_gate(X86_TRAP_BR, &bounds); 693 set_intr_gate(X86_TRAP_UD, &invalid_op); 694 set_intr_gate(X86_TRAP_NM, &device_not_available); 695 #ifdef CONFIG_X86_32 696 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 697 #else 698 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 699 #endif 700 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 701 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 702 set_intr_gate(X86_TRAP_NP, &segment_not_present); 703 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 704 set_intr_gate(X86_TRAP_GP, &general_protection); 705 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 706 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 707 set_intr_gate(X86_TRAP_AC, &alignment_check); 708 #ifdef CONFIG_X86_MCE 709 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 710 #endif 711 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 712 713 /* Reserve all the builtin and the syscall vector: */ 714 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 715 set_bit(i, used_vectors); 716 717 #ifdef CONFIG_IA32_EMULATION 718 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 719 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 720 #endif 721 722 #ifdef CONFIG_X86_32 723 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 724 set_bit(SYSCALL_VECTOR, used_vectors); 725 #endif 726 727 /* 728 * Should be a barrier for any external CPU state: 729 */ 730 cpu_init(); 731 732 x86_init.irqs.trap_init(); 733 734 #ifdef CONFIG_X86_64 735 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 736 set_nmi_gate(X86_TRAP_DB, &debug); 737 set_nmi_gate(X86_TRAP_BP, &int3); 738 #endif 739 } 740