1 /* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 * 5 * Pentium III FXSR, SSE support 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 */ 8 9 /* 10 * Handle hardware traps and faults. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/interrupt.h> 16 #include <linux/kallsyms.h> 17 #include <linux/spinlock.h> 18 #include <linux/kprobes.h> 19 #include <linux/uaccess.h> 20 #include <linux/kdebug.h> 21 #include <linux/kgdb.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/ptrace.h> 25 #include <linux/string.h> 26 #include <linux/delay.h> 27 #include <linux/errno.h> 28 #include <linux/kexec.h> 29 #include <linux/sched.h> 30 #include <linux/timer.h> 31 #include <linux/init.h> 32 #include <linux/bug.h> 33 #include <linux/nmi.h> 34 #include <linux/mm.h> 35 #include <linux/smp.h> 36 #include <linux/io.h> 37 38 #ifdef CONFIG_EISA 39 #include <linux/ioport.h> 40 #include <linux/eisa.h> 41 #endif 42 43 #if defined(CONFIG_EDAC) 44 #include <linux/edac.h> 45 #endif 46 47 #include <asm/kmemcheck.h> 48 #include <asm/stacktrace.h> 49 #include <asm/processor.h> 50 #include <asm/debugreg.h> 51 #include <linux/atomic.h> 52 #include <asm/ftrace.h> 53 #include <asm/traps.h> 54 #include <asm/desc.h> 55 #include <asm/i387.h> 56 #include <asm/fpu-internal.h> 57 #include <asm/mce.h> 58 59 #include <asm/mach_traps.h> 60 61 #ifdef CONFIG_X86_64 62 #include <asm/x86_init.h> 63 #include <asm/pgalloc.h> 64 #include <asm/proto.h> 65 #else 66 #include <asm/processor-flags.h> 67 #include <asm/setup.h> 68 69 asmlinkage int system_call(void); 70 71 /* Do we ignore FPU interrupts ? */ 72 char ignore_fpu_irq; 73 74 /* 75 * The IDT has to be page-aligned to simplify the Pentium 76 * F0 0F bug workaround. 77 */ 78 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; 79 #endif 80 81 DECLARE_BITMAP(used_vectors, NR_VECTORS); 82 EXPORT_SYMBOL_GPL(used_vectors); 83 84 static inline void conditional_sti(struct pt_regs *regs) 85 { 86 if (regs->flags & X86_EFLAGS_IF) 87 local_irq_enable(); 88 } 89 90 static inline void preempt_conditional_sti(struct pt_regs *regs) 91 { 92 inc_preempt_count(); 93 if (regs->flags & X86_EFLAGS_IF) 94 local_irq_enable(); 95 } 96 97 static inline void conditional_cli(struct pt_regs *regs) 98 { 99 if (regs->flags & X86_EFLAGS_IF) 100 local_irq_disable(); 101 } 102 103 static inline void preempt_conditional_cli(struct pt_regs *regs) 104 { 105 if (regs->flags & X86_EFLAGS_IF) 106 local_irq_disable(); 107 dec_preempt_count(); 108 } 109 110 static void __kprobes 111 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 112 long error_code, siginfo_t *info) 113 { 114 struct task_struct *tsk = current; 115 116 #ifdef CONFIG_X86_32 117 if (regs->flags & X86_VM_MASK) { 118 /* 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 120 * On nmi (interrupt 2), do_trap should not be called. 121 */ 122 if (trapnr < X86_TRAP_UD) 123 goto vm86_trap; 124 goto trap_signal; 125 } 126 #endif 127 128 if (!user_mode(regs)) 129 goto kernel_trap; 130 131 #ifdef CONFIG_X86_32 132 trap_signal: 133 #endif 134 /* 135 * We want error_code and trap_nr set for userspace faults and 136 * kernelspace faults which result in die(), but not 137 * kernelspace faults which are fixed up. die() gives the 138 * process no chance to handle the signal and notice the 139 * kernel fault information, so that won't result in polluting 140 * the information about previously queued, but not yet 141 * delivered, faults. See also do_general_protection below. 142 */ 143 tsk->thread.error_code = error_code; 144 tsk->thread.trap_nr = trapnr; 145 146 #ifdef CONFIG_X86_64 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 148 printk_ratelimit()) { 149 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 150 tsk->comm, tsk->pid, str, 151 regs->ip, regs->sp, error_code); 152 print_vma_addr(" in ", regs->ip); 153 pr_cont("\n"); 154 } 155 #endif 156 157 if (info) 158 force_sig_info(signr, info, tsk); 159 else 160 force_sig(signr, tsk); 161 return; 162 163 kernel_trap: 164 if (!fixup_exception(regs)) { 165 tsk->thread.error_code = error_code; 166 tsk->thread.trap_nr = trapnr; 167 die(str, regs, error_code); 168 } 169 return; 170 171 #ifdef CONFIG_X86_32 172 vm86_trap: 173 if (handle_vm86_trap((struct kernel_vm86_regs *) regs, 174 error_code, trapnr)) 175 goto trap_signal; 176 return; 177 #endif 178 } 179 180 #define DO_ERROR(trapnr, signr, str, name) \ 181 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 182 { \ 183 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 184 == NOTIFY_STOP) \ 185 return; \ 186 conditional_sti(regs); \ 187 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 188 } 189 190 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 191 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 192 { \ 193 siginfo_t info; \ 194 info.si_signo = signr; \ 195 info.si_errno = 0; \ 196 info.si_code = sicode; \ 197 info.si_addr = (void __user *)siaddr; \ 198 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 199 == NOTIFY_STOP) \ 200 return; \ 201 conditional_sti(regs); \ 202 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 } 204 205 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 206 regs->ip) 207 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 208 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 209 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 210 regs->ip) 211 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 212 coprocessor_segment_overrun) 213 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 214 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 215 #ifdef CONFIG_X86_32 216 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 217 #endif 218 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 219 BUS_ADRALN, 0) 220 221 #ifdef CONFIG_X86_64 222 /* Runs on IST stack */ 223 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224 { 225 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) 227 return; 228 preempt_conditional_sti(regs); 229 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 230 preempt_conditional_cli(regs); 231 } 232 233 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 234 { 235 static const char str[] = "double fault"; 236 struct task_struct *tsk = current; 237 238 /* Return not checked because double check cannot be ignored */ 239 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 240 241 tsk->thread.error_code = error_code; 242 tsk->thread.trap_nr = X86_TRAP_DF; 243 244 /* 245 * This is always a kernel trap and never fixable (and thus must 246 * never return). 247 */ 248 for (;;) 249 die(str, regs, error_code); 250 } 251 #endif 252 253 dotraplinkage void __kprobes 254 do_general_protection(struct pt_regs *regs, long error_code) 255 { 256 struct task_struct *tsk; 257 258 conditional_sti(regs); 259 260 #ifdef CONFIG_X86_32 261 if (regs->flags & X86_VM_MASK) 262 goto gp_in_vm86; 263 #endif 264 265 tsk = current; 266 if (!user_mode(regs)) 267 goto gp_in_kernel; 268 269 tsk->thread.error_code = error_code; 270 tsk->thread.trap_nr = X86_TRAP_GP; 271 272 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 printk_ratelimit()) { 274 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 275 tsk->comm, task_pid_nr(tsk), 276 regs->ip, regs->sp, error_code); 277 print_vma_addr(" in ", regs->ip); 278 pr_cont("\n"); 279 } 280 281 force_sig(SIGSEGV, tsk); 282 return; 283 284 #ifdef CONFIG_X86_32 285 gp_in_vm86: 286 local_irq_enable(); 287 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 288 return; 289 #endif 290 291 gp_in_kernel: 292 if (fixup_exception(regs)) 293 return; 294 295 tsk->thread.error_code = error_code; 296 tsk->thread.trap_nr = X86_TRAP_GP; 297 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 298 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) 299 return; 300 die("general protection fault", regs, error_code); 301 } 302 303 /* May run on IST stack. */ 304 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 305 { 306 #ifdef CONFIG_DYNAMIC_FTRACE 307 /* 308 * ftrace must be first, everything else may cause a recursive crash. 309 * See note by declaration of modifying_ftrace_code in ftrace.c 310 */ 311 if (unlikely(atomic_read(&modifying_ftrace_code)) && 312 ftrace_int3_handler(regs)) 313 return; 314 #endif 315 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 316 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 317 SIGTRAP) == NOTIFY_STOP) 318 return; 319 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 320 321 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 322 SIGTRAP) == NOTIFY_STOP) 323 return; 324 325 /* 326 * Let others (NMI) know that the debug stack is in use 327 * as we may switch to the interrupt stack. 328 */ 329 debug_stack_usage_inc(); 330 preempt_conditional_sti(regs); 331 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 332 preempt_conditional_cli(regs); 333 debug_stack_usage_dec(); 334 } 335 336 #ifdef CONFIG_X86_64 337 /* 338 * Help handler running on IST stack to switch back to user stack 339 * for scheduling or signal handling. The actual stack switch is done in 340 * entry.S 341 */ 342 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 343 { 344 struct pt_regs *regs = eregs; 345 /* Did already sync */ 346 if (eregs == (struct pt_regs *)eregs->sp) 347 ; 348 /* Exception from user space */ 349 else if (user_mode(eregs)) 350 regs = task_pt_regs(current); 351 /* 352 * Exception from kernel and interrupts are enabled. Move to 353 * kernel process stack. 354 */ 355 else if (eregs->flags & X86_EFLAGS_IF) 356 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 357 if (eregs != regs) 358 *regs = *eregs; 359 return regs; 360 } 361 #endif 362 363 /* 364 * Our handling of the processor debug registers is non-trivial. 365 * We do not clear them on entry and exit from the kernel. Therefore 366 * it is possible to get a watchpoint trap here from inside the kernel. 367 * However, the code in ./ptrace.c has ensured that the user can 368 * only set watchpoints on userspace addresses. Therefore the in-kernel 369 * watchpoint trap can only occur in code which is reading/writing 370 * from user space. Such code must not hold kernel locks (since it 371 * can equally take a page fault), therefore it is safe to call 372 * force_sig_info even though that claims and releases locks. 373 * 374 * Code in ./signal.c ensures that the debug control register 375 * is restored before we deliver any signal, and therefore that 376 * user code runs with the correct debug control register even though 377 * we clear it here. 378 * 379 * Being careful here means that we don't have to be as careful in a 380 * lot of more complicated places (task switching can be a bit lazy 381 * about restoring all the debug state, and ptrace doesn't have to 382 * find every occurrence of the TF bit that could be saved away even 383 * by user code) 384 * 385 * May run on IST stack. 386 */ 387 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 388 { 389 struct task_struct *tsk = current; 390 int user_icebp = 0; 391 unsigned long dr6; 392 int si_code; 393 394 get_debugreg(dr6, 6); 395 396 /* Filter out all the reserved bits which are preset to 1 */ 397 dr6 &= ~DR6_RESERVED; 398 399 /* 400 * If dr6 has no reason to give us about the origin of this trap, 401 * then it's very likely the result of an icebp/int01 trap. 402 * User wants a sigtrap for that. 403 */ 404 if (!dr6 && user_mode(regs)) 405 user_icebp = 1; 406 407 /* Catch kmemcheck conditions first of all! */ 408 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 409 return; 410 411 /* DR6 may or may not be cleared by the CPU */ 412 set_debugreg(0, 6); 413 414 /* 415 * The processor cleared BTF, so don't mark that we need it set. 416 */ 417 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); 418 419 /* Store the virtualized DR6 value */ 420 tsk->thread.debugreg6 = dr6; 421 422 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, 423 SIGTRAP) == NOTIFY_STOP) 424 return; 425 426 /* 427 * Let others (NMI) know that the debug stack is in use 428 * as we may switch to the interrupt stack. 429 */ 430 debug_stack_usage_inc(); 431 432 /* It's safe to allow irq's after DR6 has been saved */ 433 preempt_conditional_sti(regs); 434 435 if (regs->flags & X86_VM_MASK) { 436 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 437 X86_TRAP_DB); 438 preempt_conditional_cli(regs); 439 debug_stack_usage_dec(); 440 return; 441 } 442 443 /* 444 * Single-stepping through system calls: ignore any exceptions in 445 * kernel space, but re-enable TF when returning to user mode. 446 * 447 * We already checked v86 mode above, so we can check for kernel mode 448 * by just checking the CPL of CS. 449 */ 450 if ((dr6 & DR_STEP) && !user_mode(regs)) { 451 tsk->thread.debugreg6 &= ~DR_STEP; 452 set_tsk_thread_flag(tsk, TIF_SINGLESTEP); 453 regs->flags &= ~X86_EFLAGS_TF; 454 } 455 si_code = get_si_code(tsk->thread.debugreg6); 456 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 457 send_sigtrap(tsk, regs, error_code, si_code); 458 preempt_conditional_cli(regs); 459 debug_stack_usage_dec(); 460 461 return; 462 } 463 464 /* 465 * Note that we play around with the 'TS' bit in an attempt to get 466 * the correct behaviour even in the presence of the asynchronous 467 * IRQ13 behaviour 468 */ 469 void math_error(struct pt_regs *regs, int error_code, int trapnr) 470 { 471 struct task_struct *task = current; 472 siginfo_t info; 473 unsigned short err; 474 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 475 "simd exception"; 476 477 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 478 return; 479 conditional_sti(regs); 480 481 if (!user_mode_vm(regs)) 482 { 483 if (!fixup_exception(regs)) { 484 task->thread.error_code = error_code; 485 task->thread.trap_nr = trapnr; 486 die(str, regs, error_code); 487 } 488 return; 489 } 490 491 /* 492 * Save the info for the exception handler and clear the error. 493 */ 494 save_init_fpu(task); 495 task->thread.trap_nr = trapnr; 496 task->thread.error_code = error_code; 497 info.si_signo = SIGFPE; 498 info.si_errno = 0; 499 info.si_addr = (void __user *)regs->ip; 500 if (trapnr == X86_TRAP_MF) { 501 unsigned short cwd, swd; 502 /* 503 * (~cwd & swd) will mask out exceptions that are not set to unmasked 504 * status. 0x3f is the exception bits in these regs, 0x200 is the 505 * C1 reg you need in case of a stack fault, 0x040 is the stack 506 * fault bit. We should only be taking one exception at a time, 507 * so if this combination doesn't produce any single exception, 508 * then we have a bad program that isn't synchronizing its FPU usage 509 * and it will suffer the consequences since we won't be able to 510 * fully reproduce the context of the exception 511 */ 512 cwd = get_fpu_cwd(task); 513 swd = get_fpu_swd(task); 514 515 err = swd & ~cwd; 516 } else { 517 /* 518 * The SIMD FPU exceptions are handled a little differently, as there 519 * is only a single status/control register. Thus, to determine which 520 * unmasked exception was caught we must mask the exception mask bits 521 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 522 */ 523 unsigned short mxcsr = get_fpu_mxcsr(task); 524 err = ~(mxcsr >> 7) & mxcsr; 525 } 526 527 if (err & 0x001) { /* Invalid op */ 528 /* 529 * swd & 0x240 == 0x040: Stack Underflow 530 * swd & 0x240 == 0x240: Stack Overflow 531 * User must clear the SF bit (0x40) if set 532 */ 533 info.si_code = FPE_FLTINV; 534 } else if (err & 0x004) { /* Divide by Zero */ 535 info.si_code = FPE_FLTDIV; 536 } else if (err & 0x008) { /* Overflow */ 537 info.si_code = FPE_FLTOVF; 538 } else if (err & 0x012) { /* Denormal, Underflow */ 539 info.si_code = FPE_FLTUND; 540 } else if (err & 0x020) { /* Precision */ 541 info.si_code = FPE_FLTRES; 542 } else { 543 /* 544 * If we're using IRQ 13, or supposedly even some trap 545 * X86_TRAP_MF implementations, it's possible 546 * we get a spurious trap, which is not an error. 547 */ 548 return; 549 } 550 force_sig_info(SIGFPE, &info, task); 551 } 552 553 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 554 { 555 #ifdef CONFIG_X86_32 556 ignore_fpu_irq = 1; 557 #endif 558 559 math_error(regs, error_code, X86_TRAP_MF); 560 } 561 562 dotraplinkage void 563 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 564 { 565 math_error(regs, error_code, X86_TRAP_XF); 566 } 567 568 dotraplinkage void 569 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) 570 { 571 conditional_sti(regs); 572 #if 0 573 /* No need to warn about this any longer. */ 574 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 575 #endif 576 } 577 578 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 579 { 580 } 581 582 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 583 { 584 } 585 586 /* 587 * 'math_state_restore()' saves the current math information in the 588 * old math state array, and gets the new ones from the current task 589 * 590 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 591 * Don't touch unless you *really* know how it works. 592 * 593 * Must be called with kernel preemption disabled (eg with local 594 * local interrupts as in the case of do_device_not_available). 595 */ 596 void math_state_restore(void) 597 { 598 struct task_struct *tsk = current; 599 600 if (!tsk_used_math(tsk)) { 601 local_irq_enable(); 602 /* 603 * does a slab alloc which can sleep 604 */ 605 if (init_fpu(tsk)) { 606 /* 607 * ran out of memory! 608 */ 609 do_group_exit(SIGKILL); 610 return; 611 } 612 local_irq_disable(); 613 } 614 615 __thread_fpu_begin(tsk); 616 /* 617 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 618 */ 619 if (unlikely(restore_fpu_checking(tsk))) { 620 __thread_fpu_end(tsk); 621 force_sig(SIGSEGV, tsk); 622 return; 623 } 624 625 tsk->fpu_counter++; 626 } 627 EXPORT_SYMBOL_GPL(math_state_restore); 628 629 dotraplinkage void __kprobes 630 do_device_not_available(struct pt_regs *regs, long error_code) 631 { 632 #ifdef CONFIG_MATH_EMULATION 633 if (read_cr0() & X86_CR0_EM) { 634 struct math_emu_info info = { }; 635 636 conditional_sti(regs); 637 638 info.regs = regs; 639 math_emulate(&info); 640 return; 641 } 642 #endif 643 math_state_restore(); /* interrupts still off */ 644 #ifdef CONFIG_X86_32 645 conditional_sti(regs); 646 #endif 647 } 648 649 #ifdef CONFIG_X86_32 650 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 651 { 652 siginfo_t info; 653 local_irq_enable(); 654 655 info.si_signo = SIGILL; 656 info.si_errno = 0; 657 info.si_code = ILL_BADSTK; 658 info.si_addr = NULL; 659 if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 660 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) 661 return; 662 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 663 &info); 664 } 665 #endif 666 667 /* Set of traps needed for early debugging. */ 668 void __init early_trap_init(void) 669 { 670 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 671 /* int3 can be called from all */ 672 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 673 set_intr_gate(X86_TRAP_PF, &page_fault); 674 load_idt(&idt_descr); 675 } 676 677 void __init trap_init(void) 678 { 679 int i; 680 681 #ifdef CONFIG_EISA 682 void __iomem *p = early_ioremap(0x0FFFD9, 4); 683 684 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 685 EISA_bus = 1; 686 early_iounmap(p, 4); 687 #endif 688 689 set_intr_gate(X86_TRAP_DE, ÷_error); 690 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 691 /* int4 can be called from all */ 692 set_system_intr_gate(X86_TRAP_OF, &overflow); 693 set_intr_gate(X86_TRAP_BR, &bounds); 694 set_intr_gate(X86_TRAP_UD, &invalid_op); 695 set_intr_gate(X86_TRAP_NM, &device_not_available); 696 #ifdef CONFIG_X86_32 697 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 698 #else 699 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 700 #endif 701 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 702 set_intr_gate(X86_TRAP_TS, &invalid_TSS); 703 set_intr_gate(X86_TRAP_NP, &segment_not_present); 704 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 705 set_intr_gate(X86_TRAP_GP, &general_protection); 706 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 707 set_intr_gate(X86_TRAP_MF, &coprocessor_error); 708 set_intr_gate(X86_TRAP_AC, &alignment_check); 709 #ifdef CONFIG_X86_MCE 710 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 711 #endif 712 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 713 714 /* Reserve all the builtin and the syscall vector: */ 715 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 716 set_bit(i, used_vectors); 717 718 #ifdef CONFIG_IA32_EMULATION 719 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 720 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 721 #endif 722 723 #ifdef CONFIG_X86_32 724 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 725 set_bit(SYSCALL_VECTOR, used_vectors); 726 #endif 727 728 /* 729 * Should be a barrier for any external CPU state: 730 */ 731 cpu_init(); 732 733 x86_init.irqs.trap_init(); 734 735 #ifdef CONFIG_X86_64 736 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 737 set_nmi_gate(X86_TRAP_DB, &debug); 738 set_nmi_gate(X86_TRAP_BP, &int3); 739 #endif 740 } 741