1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86_64/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 8 * 9 * entry.S contains the system-call and fault low-level handling routines. 10 * 11 * Some of this is documented in Documentation/x86/entry_64.txt 12 * 13 * A note on terminology: 14 * - iret frame: Architecture defined interrupt frame from SS to RIP 15 * at the top of the kernel process stack. 16 * 17 * Some macro usage: 18 * - ENTRY/END: Define functions in the symbol table. 19 * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 20 * - idtentry: Define exception entry points. 21 */ 22#include <linux/linkage.h> 23#include <asm/segment.h> 24#include <asm/cache.h> 25#include <asm/errno.h> 26#include "calling.h" 27#include <asm/asm-offsets.h> 28#include <asm/msr.h> 29#include <asm/unistd.h> 30#include <asm/thread_info.h> 31#include <asm/hw_irq.h> 32#include <asm/page_types.h> 33#include <asm/irqflags.h> 34#include <asm/paravirt.h> 35#include <asm/percpu.h> 36#include <asm/asm.h> 37#include <asm/smap.h> 38#include <asm/pgtable_types.h> 39#include <asm/export.h> 40#include <asm/frame.h> 41#include <linux/err.h> 42 43.code64 44.section .entry.text, "ax" 45 46#ifdef CONFIG_PARAVIRT 47ENTRY(native_usergs_sysret64) 48 UNWIND_HINT_EMPTY 49 swapgs 50 sysretq 51END(native_usergs_sysret64) 52#endif /* CONFIG_PARAVIRT */ 53 54.macro TRACE_IRQS_IRETQ 55#ifdef CONFIG_TRACE_IRQFLAGS 56 bt $9, EFLAGS(%rsp) /* interrupts off? */ 57 jnc 1f 58 TRACE_IRQS_ON 591: 60#endif 61.endm 62 63/* 64 * When dynamic function tracer is enabled it will add a breakpoint 65 * to all locations that it is about to modify, sync CPUs, update 66 * all the code, sync CPUs, then remove the breakpoints. In this time 67 * if lockdep is enabled, it might jump back into the debug handler 68 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 69 * 70 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 71 * make sure the stack pointer does not get reset back to the top 72 * of the debug stack, and instead just reuses the current stack. 73 */ 74#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 75 76.macro TRACE_IRQS_OFF_DEBUG 77 call debug_stack_set_zero 78 TRACE_IRQS_OFF 79 call debug_stack_reset 80.endm 81 82.macro TRACE_IRQS_ON_DEBUG 83 call debug_stack_set_zero 84 TRACE_IRQS_ON 85 call debug_stack_reset 86.endm 87 88.macro TRACE_IRQS_IRETQ_DEBUG 89 bt $9, EFLAGS(%rsp) /* interrupts off? */ 90 jnc 1f 91 TRACE_IRQS_ON_DEBUG 921: 93.endm 94 95#else 96# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 97# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 98# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 99#endif 100 101/* 102 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 103 * 104 * This is the only entry point used for 64-bit system calls. The 105 * hardware interface is reasonably well designed and the register to 106 * argument mapping Linux uses fits well with the registers that are 107 * available when SYSCALL is used. 108 * 109 * SYSCALL instructions can be found inlined in libc implementations as 110 * well as some other programs and libraries. There are also a handful 111 * of SYSCALL instructions in the vDSO used, for example, as a 112 * clock_gettimeofday fallback. 113 * 114 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 115 * then loads new ss, cs, and rip from previously programmed MSRs. 116 * rflags gets masked by a value from another MSR (so CLD and CLAC 117 * are not needed). SYSCALL does not save anything on the stack 118 * and does not change rsp. 119 * 120 * Registers on entry: 121 * rax system call number 122 * rcx return address 123 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 124 * rdi arg0 125 * rsi arg1 126 * rdx arg2 127 * r10 arg3 (needs to be moved to rcx to conform to C ABI) 128 * r8 arg4 129 * r9 arg5 130 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 131 * 132 * Only called from user space. 133 * 134 * When user can change pt_regs->foo always force IRET. That is because 135 * it deals with uncanonical addresses better. SYSRET has trouble 136 * with them due to bugs in both AMD and Intel CPUs. 137 */ 138 139ENTRY(entry_SYSCALL_64) 140 UNWIND_HINT_EMPTY 141 /* 142 * Interrupts are off on entry. 143 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 144 * it is too small to ever cause noticeable irq latency. 145 */ 146 147 swapgs 148 movq %rsp, PER_CPU_VAR(rsp_scratch) 149 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 150 151 TRACE_IRQS_OFF 152 153 /* Construct struct pt_regs on stack */ 154 pushq $__USER_DS /* pt_regs->ss */ 155 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 156 pushq %r11 /* pt_regs->flags */ 157 pushq $__USER_CS /* pt_regs->cs */ 158 pushq %rcx /* pt_regs->ip */ 159GLOBAL(entry_SYSCALL_64_after_hwframe) 160 pushq %rax /* pt_regs->orig_ax */ 161 pushq %rdi /* pt_regs->di */ 162 pushq %rsi /* pt_regs->si */ 163 pushq %rdx /* pt_regs->dx */ 164 pushq %rcx /* pt_regs->cx */ 165 pushq $-ENOSYS /* pt_regs->ax */ 166 pushq %r8 /* pt_regs->r8 */ 167 pushq %r9 /* pt_regs->r9 */ 168 pushq %r10 /* pt_regs->r10 */ 169 pushq %r11 /* pt_regs->r11 */ 170 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ 171 UNWIND_HINT_REGS extra=0 172 173 /* 174 * If we need to do entry work or if we guess we'll need to do 175 * exit work, go straight to the slow path. 176 */ 177 movq PER_CPU_VAR(current_task), %r11 178 testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 179 jnz entry_SYSCALL64_slow_path 180 181entry_SYSCALL_64_fastpath: 182 /* 183 * Easy case: enable interrupts and issue the syscall. If the syscall 184 * needs pt_regs, we'll call a stub that disables interrupts again 185 * and jumps to the slow path. 186 */ 187 TRACE_IRQS_ON 188 ENABLE_INTERRUPTS(CLBR_NONE) 189#if __SYSCALL_MASK == ~0 190 cmpq $__NR_syscall_max, %rax 191#else 192 andl $__SYSCALL_MASK, %eax 193 cmpl $__NR_syscall_max, %eax 194#endif 195 ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 196 movq %r10, %rcx 197 198 /* 199 * This call instruction is handled specially in stub_ptregs_64. 200 * It might end up jumping to the slow path. If it jumps, RAX 201 * and all argument registers are clobbered. 202 */ 203 call *sys_call_table(, %rax, 8) 204.Lentry_SYSCALL_64_after_fastpath_call: 205 206 movq %rax, RAX(%rsp) 2071: 208 209 /* 210 * If we get here, then we know that pt_regs is clean for SYSRET64. 211 * If we see that no exit work is required (which we are required 212 * to check with IRQs off), then we can go straight to SYSRET64. 213 */ 214 DISABLE_INTERRUPTS(CLBR_ANY) 215 TRACE_IRQS_OFF 216 movq PER_CPU_VAR(current_task), %r11 217 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 218 jnz 1f 219 220 LOCKDEP_SYS_EXIT 221 TRACE_IRQS_ON /* user mode is traced as IRQs on */ 222 movq RIP(%rsp), %rcx 223 movq EFLAGS(%rsp), %r11 224 addq $6*8, %rsp /* skip extra regs -- they were preserved */ 225 UNWIND_HINT_EMPTY 226 jmp .Lpop_c_regs_except_rcx_r11_and_sysret 227 2281: 229 /* 230 * The fast path looked good when we started, but something changed 231 * along the way and we need to switch to the slow path. Calling 232 * raise(3) will trigger this, for example. IRQs are off. 233 */ 234 TRACE_IRQS_ON 235 ENABLE_INTERRUPTS(CLBR_ANY) 236 SAVE_EXTRA_REGS 237 movq %rsp, %rdi 238 call syscall_return_slowpath /* returns with IRQs disabled */ 239 jmp return_from_SYSCALL_64 240 241entry_SYSCALL64_slow_path: 242 /* IRQs are off. */ 243 SAVE_EXTRA_REGS 244 movq %rsp, %rdi 245 call do_syscall_64 /* returns with IRQs disabled */ 246 247return_from_SYSCALL_64: 248 TRACE_IRQS_IRETQ /* we're about to change IF */ 249 250 /* 251 * Try to use SYSRET instead of IRET if we're returning to 252 * a completely clean 64-bit userspace context. If we're not, 253 * go to the slow exit path. 254 */ 255 movq RCX(%rsp), %rcx 256 movq RIP(%rsp), %r11 257 258 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 259 jne swapgs_restore_regs_and_return_to_usermode 260 261 /* 262 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 263 * in kernel space. This essentially lets the user take over 264 * the kernel, since userspace controls RSP. 265 * 266 * If width of "canonical tail" ever becomes variable, this will need 267 * to be updated to remain correct on both old and new CPUs. 268 * 269 * Change top bits to match most significant bit (47th or 56th bit 270 * depending on paging mode) in the address. 271 */ 272 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 273 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 274 275 /* If this changed %rcx, it was not canonical */ 276 cmpq %rcx, %r11 277 jne swapgs_restore_regs_and_return_to_usermode 278 279 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 280 jne swapgs_restore_regs_and_return_to_usermode 281 282 movq R11(%rsp), %r11 283 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 284 jne swapgs_restore_regs_and_return_to_usermode 285 286 /* 287 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 288 * restore RF properly. If the slowpath sets it for whatever reason, we 289 * need to restore it correctly. 290 * 291 * SYSRET can restore TF, but unlike IRET, restoring TF results in a 292 * trap from userspace immediately after SYSRET. This would cause an 293 * infinite loop whenever #DB happens with register state that satisfies 294 * the opportunistic SYSRET conditions. For example, single-stepping 295 * this user code: 296 * 297 * movq $stuck_here, %rcx 298 * pushfq 299 * popq %r11 300 * stuck_here: 301 * 302 * would never get past 'stuck_here'. 303 */ 304 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 305 jnz swapgs_restore_regs_and_return_to_usermode 306 307 /* nothing to check for RSP */ 308 309 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 310 jne swapgs_restore_regs_and_return_to_usermode 311 312 /* 313 * We win! This label is here just for ease of understanding 314 * perf profiles. Nothing jumps here. 315 */ 316syscall_return_via_sysret: 317 /* rcx and r11 are already restored (see code above) */ 318 UNWIND_HINT_EMPTY 319 POP_EXTRA_REGS 320.Lpop_c_regs_except_rcx_r11_and_sysret: 321 popq %rsi /* skip r11 */ 322 popq %r10 323 popq %r9 324 popq %r8 325 popq %rax 326 popq %rsi /* skip rcx */ 327 popq %rdx 328 popq %rsi 329 popq %rdi 330 movq RSP-ORIG_RAX(%rsp), %rsp 331 USERGS_SYSRET64 332END(entry_SYSCALL_64) 333 334ENTRY(stub_ptregs_64) 335 /* 336 * Syscalls marked as needing ptregs land here. 337 * If we are on the fast path, we need to save the extra regs, 338 * which we achieve by trying again on the slow path. If we are on 339 * the slow path, the extra regs are already saved. 340 * 341 * RAX stores a pointer to the C function implementing the syscall. 342 * IRQs are on. 343 */ 344 cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) 345 jne 1f 346 347 /* 348 * Called from fast path -- disable IRQs again, pop return address 349 * and jump to slow path 350 */ 351 DISABLE_INTERRUPTS(CLBR_ANY) 352 TRACE_IRQS_OFF 353 popq %rax 354 UNWIND_HINT_REGS extra=0 355 jmp entry_SYSCALL64_slow_path 356 3571: 358 jmp *%rax /* Called from C */ 359END(stub_ptregs_64) 360 361.macro ptregs_stub func 362ENTRY(ptregs_\func) 363 UNWIND_HINT_FUNC 364 leaq \func(%rip), %rax 365 jmp stub_ptregs_64 366END(ptregs_\func) 367.endm 368 369/* Instantiate ptregs_stub for each ptregs-using syscall */ 370#define __SYSCALL_64_QUAL_(sym) 371#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym 372#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) 373#include <asm/syscalls_64.h> 374 375/* 376 * %rdi: prev task 377 * %rsi: next task 378 */ 379ENTRY(__switch_to_asm) 380 UNWIND_HINT_FUNC 381 /* 382 * Save callee-saved registers 383 * This must match the order in inactive_task_frame 384 */ 385 pushq %rbp 386 pushq %rbx 387 pushq %r12 388 pushq %r13 389 pushq %r14 390 pushq %r15 391 392 /* switch stack */ 393 movq %rsp, TASK_threadsp(%rdi) 394 movq TASK_threadsp(%rsi), %rsp 395 396#ifdef CONFIG_CC_STACKPROTECTOR 397 movq TASK_stack_canary(%rsi), %rbx 398 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 399#endif 400 401 /* restore callee-saved registers */ 402 popq %r15 403 popq %r14 404 popq %r13 405 popq %r12 406 popq %rbx 407 popq %rbp 408 409 jmp __switch_to 410END(__switch_to_asm) 411 412/* 413 * A newly forked process directly context switches into this address. 414 * 415 * rax: prev task we switched from 416 * rbx: kernel thread func (NULL for user thread) 417 * r12: kernel thread arg 418 */ 419ENTRY(ret_from_fork) 420 UNWIND_HINT_EMPTY 421 movq %rax, %rdi 422 call schedule_tail /* rdi: 'prev' task parameter */ 423 424 testq %rbx, %rbx /* from kernel_thread? */ 425 jnz 1f /* kernel threads are uncommon */ 426 4272: 428 UNWIND_HINT_REGS 429 movq %rsp, %rdi 430 call syscall_return_slowpath /* returns with IRQs disabled */ 431 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 432 jmp swapgs_restore_regs_and_return_to_usermode 433 4341: 435 /* kernel thread */ 436 movq %r12, %rdi 437 call *%rbx 438 /* 439 * A kernel thread is allowed to return here after successfully 440 * calling do_execve(). Exit to userspace to complete the execve() 441 * syscall. 442 */ 443 movq $0, RAX(%rsp) 444 jmp 2b 445END(ret_from_fork) 446 447/* 448 * Build the entry stubs with some assembler magic. 449 * We pack 1 stub into every 8-byte block. 450 */ 451 .align 8 452ENTRY(irq_entries_start) 453 vector=FIRST_EXTERNAL_VECTOR 454 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 455 UNWIND_HINT_IRET_REGS 456 pushq $(~vector+0x80) /* Note: always in signed byte range */ 457 jmp common_interrupt 458 .align 8 459 vector=vector+1 460 .endr 461END(irq_entries_start) 462 463.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 464#ifdef CONFIG_DEBUG_ENTRY 465 pushfq 466 testl $X86_EFLAGS_IF, (%rsp) 467 jz .Lokay_\@ 468 ud2 469.Lokay_\@: 470 addq $8, %rsp 471#endif 472.endm 473 474/* 475 * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 476 * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 477 * Requires kernel GSBASE. 478 * 479 * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 480 */ 481.macro ENTER_IRQ_STACK regs=1 old_rsp 482 DEBUG_ENTRY_ASSERT_IRQS_OFF 483 movq %rsp, \old_rsp 484 485 .if \regs 486 UNWIND_HINT_REGS base=\old_rsp 487 .endif 488 489 incl PER_CPU_VAR(irq_count) 490 jnz .Lirq_stack_push_old_rsp_\@ 491 492 /* 493 * Right now, if we just incremented irq_count to zero, we've 494 * claimed the IRQ stack but we haven't switched to it yet. 495 * 496 * If anything is added that can interrupt us here without using IST, 497 * it must be *extremely* careful to limit its stack usage. This 498 * could include kprobes and a hypothetical future IST-less #DB 499 * handler. 500 * 501 * The OOPS unwinder relies on the word at the top of the IRQ 502 * stack linking back to the previous RSP for the entire time we're 503 * on the IRQ stack. For this to work reliably, we need to write 504 * it before we actually move ourselves to the IRQ stack. 505 */ 506 507 movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 508 movq PER_CPU_VAR(irq_stack_ptr), %rsp 509 510#ifdef CONFIG_DEBUG_ENTRY 511 /* 512 * If the first movq above becomes wrong due to IRQ stack layout 513 * changes, the only way we'll notice is if we try to unwind right 514 * here. Assert that we set up the stack right to catch this type 515 * of bug quickly. 516 */ 517 cmpq -8(%rsp), \old_rsp 518 je .Lirq_stack_okay\@ 519 ud2 520 .Lirq_stack_okay\@: 521#endif 522 523.Lirq_stack_push_old_rsp_\@: 524 pushq \old_rsp 525 526 .if \regs 527 UNWIND_HINT_REGS indirect=1 528 .endif 529.endm 530 531/* 532 * Undoes ENTER_IRQ_STACK. 533 */ 534.macro LEAVE_IRQ_STACK regs=1 535 DEBUG_ENTRY_ASSERT_IRQS_OFF 536 /* We need to be off the IRQ stack before decrementing irq_count. */ 537 popq %rsp 538 539 .if \regs 540 UNWIND_HINT_REGS 541 .endif 542 543 /* 544 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 545 * the irq stack but we're not on it. 546 */ 547 548 decl PER_CPU_VAR(irq_count) 549.endm 550 551/* 552 * Interrupt entry/exit. 553 * 554 * Interrupt entry points save only callee clobbered registers in fast path. 555 * 556 * Entry runs with interrupts off. 557 */ 558 559/* 0(%rsp): ~(interrupt number) */ 560 .macro interrupt func 561 cld 562 ALLOC_PT_GPREGS_ON_STACK 563 SAVE_C_REGS 564 SAVE_EXTRA_REGS 565 ENCODE_FRAME_POINTER 566 567 testb $3, CS(%rsp) 568 jz 1f 569 570 /* 571 * IRQ from user mode. Switch to kernel gsbase and inform context 572 * tracking that we're in kernel mode. 573 */ 574 SWAPGS 575 576 /* 577 * We need to tell lockdep that IRQs are off. We can't do this until 578 * we fix gsbase, and we should do it before enter_from_user_mode 579 * (which can take locks). Since TRACE_IRQS_OFF idempotent, 580 * the simplest way to handle it is to just call it twice if 581 * we enter from user mode. There's no reason to optimize this since 582 * TRACE_IRQS_OFF is a no-op if lockdep is off. 583 */ 584 TRACE_IRQS_OFF 585 586 CALL_enter_from_user_mode 587 5881: 589 ENTER_IRQ_STACK old_rsp=%rdi 590 /* We entered an interrupt context - irqs are off: */ 591 TRACE_IRQS_OFF 592 593 call \func /* rdi points to pt_regs */ 594 .endm 595 596 /* 597 * The interrupt stubs push (~vector+0x80) onto the stack and 598 * then jump to common_interrupt. 599 */ 600 .p2align CONFIG_X86_L1_CACHE_SHIFT 601common_interrupt: 602 ASM_CLAC 603 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 604 interrupt do_IRQ 605 /* 0(%rsp): old RSP */ 606ret_from_intr: 607 DISABLE_INTERRUPTS(CLBR_ANY) 608 TRACE_IRQS_OFF 609 610 LEAVE_IRQ_STACK 611 612 testb $3, CS(%rsp) 613 jz retint_kernel 614 615 /* Interrupt came from user space */ 616GLOBAL(retint_user) 617 mov %rsp,%rdi 618 call prepare_exit_to_usermode 619 TRACE_IRQS_IRETQ 620 621GLOBAL(swapgs_restore_regs_and_return_to_usermode) 622#ifdef CONFIG_DEBUG_ENTRY 623 /* Assert that pt_regs indicates user mode. */ 624 testb $3, CS(%rsp) 625 jnz 1f 626 ud2 6271: 628#endif 629 SWAPGS 630 POP_EXTRA_REGS 631 POP_C_REGS 632 addq $8, %rsp /* skip regs->orig_ax */ 633 INTERRUPT_RETURN 634 635 636/* Returning to kernel space */ 637retint_kernel: 638#ifdef CONFIG_PREEMPT 639 /* Interrupts are off */ 640 /* Check if we need preemption */ 641 bt $9, EFLAGS(%rsp) /* were interrupts off? */ 642 jnc 1f 6430: cmpl $0, PER_CPU_VAR(__preempt_count) 644 jnz 1f 645 call preempt_schedule_irq 646 jmp 0b 6471: 648#endif 649 /* 650 * The iretq could re-enable interrupts: 651 */ 652 TRACE_IRQS_IRETQ 653 654GLOBAL(restore_regs_and_return_to_kernel) 655#ifdef CONFIG_DEBUG_ENTRY 656 /* Assert that pt_regs indicates kernel mode. */ 657 testb $3, CS(%rsp) 658 jz 1f 659 ud2 6601: 661#endif 662 POP_EXTRA_REGS 663 POP_C_REGS 664 addq $8, %rsp /* skip regs->orig_ax */ 665 INTERRUPT_RETURN 666 667ENTRY(native_iret) 668 UNWIND_HINT_IRET_REGS 669 /* 670 * Are we returning to a stack segment from the LDT? Note: in 671 * 64-bit mode SS:RSP on the exception stack is always valid. 672 */ 673#ifdef CONFIG_X86_ESPFIX64 674 testb $4, (SS-RIP)(%rsp) 675 jnz native_irq_return_ldt 676#endif 677 678.global native_irq_return_iret 679native_irq_return_iret: 680 /* 681 * This may fault. Non-paranoid faults on return to userspace are 682 * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 683 * Double-faults due to espfix64 are handled in do_double_fault. 684 * Other faults here are fatal. 685 */ 686 iretq 687 688#ifdef CONFIG_X86_ESPFIX64 689native_irq_return_ldt: 690 /* 691 * We are running with user GSBASE. All GPRs contain their user 692 * values. We have a percpu ESPFIX stack that is eight slots 693 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 694 * of the ESPFIX stack. 695 * 696 * We clobber RAX and RDI in this code. We stash RDI on the 697 * normal stack and RAX on the ESPFIX stack. 698 * 699 * The ESPFIX stack layout we set up looks like this: 700 * 701 * --- top of ESPFIX stack --- 702 * SS 703 * RSP 704 * RFLAGS 705 * CS 706 * RIP <-- RSP points here when we're done 707 * RAX <-- espfix_waddr points here 708 * --- bottom of ESPFIX stack --- 709 */ 710 711 pushq %rdi /* Stash user RDI */ 712 SWAPGS 713 movq PER_CPU_VAR(espfix_waddr), %rdi 714 movq %rax, (0*8)(%rdi) /* user RAX */ 715 movq (1*8)(%rsp), %rax /* user RIP */ 716 movq %rax, (1*8)(%rdi) 717 movq (2*8)(%rsp), %rax /* user CS */ 718 movq %rax, (2*8)(%rdi) 719 movq (3*8)(%rsp), %rax /* user RFLAGS */ 720 movq %rax, (3*8)(%rdi) 721 movq (5*8)(%rsp), %rax /* user SS */ 722 movq %rax, (5*8)(%rdi) 723 movq (4*8)(%rsp), %rax /* user RSP */ 724 movq %rax, (4*8)(%rdi) 725 /* Now RAX == RSP. */ 726 727 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 728 popq %rdi /* Restore user RDI */ 729 730 /* 731 * espfix_stack[31:16] == 0. The page tables are set up such that 732 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 733 * espfix_waddr for any X. That is, there are 65536 RO aliases of 734 * the same page. Set up RSP so that RSP[31:16] contains the 735 * respective 16 bits of the /userspace/ RSP and RSP nonetheless 736 * still points to an RO alias of the ESPFIX stack. 737 */ 738 orq PER_CPU_VAR(espfix_stack), %rax 739 SWAPGS 740 movq %rax, %rsp 741 UNWIND_HINT_IRET_REGS offset=8 742 743 /* 744 * At this point, we cannot write to the stack any more, but we can 745 * still read. 746 */ 747 popq %rax /* Restore user RAX */ 748 749 /* 750 * RSP now points to an ordinary IRET frame, except that the page 751 * is read-only and RSP[31:16] are preloaded with the userspace 752 * values. We can now IRET back to userspace. 753 */ 754 jmp native_irq_return_iret 755#endif 756END(common_interrupt) 757 758/* 759 * APIC interrupts. 760 */ 761.macro apicinterrupt3 num sym do_sym 762ENTRY(\sym) 763 UNWIND_HINT_IRET_REGS 764 ASM_CLAC 765 pushq $~(\num) 766.Lcommon_\sym: 767 interrupt \do_sym 768 jmp ret_from_intr 769END(\sym) 770.endm 771 772/* Make sure APIC interrupt handlers end up in the irqentry section: */ 773#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 774#define POP_SECTION_IRQENTRY .popsection 775 776.macro apicinterrupt num sym do_sym 777PUSH_SECTION_IRQENTRY 778apicinterrupt3 \num \sym \do_sym 779POP_SECTION_IRQENTRY 780.endm 781 782#ifdef CONFIG_SMP 783apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 784apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 785#endif 786 787#ifdef CONFIG_X86_UV 788apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 789#endif 790 791apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 792apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 793 794#ifdef CONFIG_HAVE_KVM 795apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 796apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 797apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 798#endif 799 800#ifdef CONFIG_X86_MCE_THRESHOLD 801apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 802#endif 803 804#ifdef CONFIG_X86_MCE_AMD 805apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 806#endif 807 808#ifdef CONFIG_X86_THERMAL_VECTOR 809apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 810#endif 811 812#ifdef CONFIG_SMP 813apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 814apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 815apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 816#endif 817 818apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 819apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 820 821#ifdef CONFIG_IRQ_WORK 822apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 823#endif 824 825/* 826 * Exception entry points. 827 */ 828#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) 829 830.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 831ENTRY(\sym) 832 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 833 834 /* Sanity check */ 835 .if \shift_ist != -1 && \paranoid == 0 836 .error "using shift_ist requires paranoid=1" 837 .endif 838 839 ASM_CLAC 840 841 .if \has_error_code == 0 842 pushq $-1 /* ORIG_RAX: no syscall to restart */ 843 .endif 844 845 ALLOC_PT_GPREGS_ON_STACK 846 847 .if \paranoid 848 .if \paranoid == 1 849 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 850 jnz 1f 851 .endif 852 call paranoid_entry 853 .else 854 call error_entry 855 .endif 856 UNWIND_HINT_REGS 857 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 858 859 .if \paranoid 860 .if \shift_ist != -1 861 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 862 .else 863 TRACE_IRQS_OFF 864 .endif 865 .endif 866 867 movq %rsp, %rdi /* pt_regs pointer */ 868 869 .if \has_error_code 870 movq ORIG_RAX(%rsp), %rsi /* get error code */ 871 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 872 .else 873 xorl %esi, %esi /* no error code */ 874 .endif 875 876 .if \shift_ist != -1 877 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 878 .endif 879 880 call \do_sym 881 882 .if \shift_ist != -1 883 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 884 .endif 885 886 /* these procedures expect "no swapgs" flag in ebx */ 887 .if \paranoid 888 jmp paranoid_exit 889 .else 890 jmp error_exit 891 .endif 892 893 .if \paranoid == 1 894 /* 895 * Paranoid entry from userspace. Switch stacks and treat it 896 * as a normal entry. This means that paranoid handlers 897 * run in real process context if user_mode(regs). 898 */ 8991: 900 call error_entry 901 902 903 movq %rsp, %rdi /* pt_regs pointer */ 904 call sync_regs 905 movq %rax, %rsp /* switch stack */ 906 907 movq %rsp, %rdi /* pt_regs pointer */ 908 909 .if \has_error_code 910 movq ORIG_RAX(%rsp), %rsi /* get error code */ 911 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 912 .else 913 xorl %esi, %esi /* no error code */ 914 .endif 915 916 call \do_sym 917 918 jmp error_exit /* %ebx: no swapgs flag */ 919 .endif 920END(\sym) 921.endm 922 923idtentry divide_error do_divide_error has_error_code=0 924idtentry overflow do_overflow has_error_code=0 925idtentry bounds do_bounds has_error_code=0 926idtentry invalid_op do_invalid_op has_error_code=0 927idtentry device_not_available do_device_not_available has_error_code=0 928idtentry double_fault do_double_fault has_error_code=1 paranoid=2 929idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 930idtentry invalid_TSS do_invalid_TSS has_error_code=1 931idtentry segment_not_present do_segment_not_present has_error_code=1 932idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 933idtentry coprocessor_error do_coprocessor_error has_error_code=0 934idtentry alignment_check do_alignment_check has_error_code=1 935idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 936 937 938 /* 939 * Reload gs selector with exception handling 940 * edi: new selector 941 */ 942ENTRY(native_load_gs_index) 943 FRAME_BEGIN 944 pushfq 945 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 946 SWAPGS 947.Lgs_change: 948 movl %edi, %gs 9492: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 950 SWAPGS 951 popfq 952 FRAME_END 953 ret 954ENDPROC(native_load_gs_index) 955EXPORT_SYMBOL(native_load_gs_index) 956 957 _ASM_EXTABLE(.Lgs_change, bad_gs) 958 .section .fixup, "ax" 959 /* running with kernelgs */ 960bad_gs: 961 SWAPGS /* switch back to user gs */ 962.macro ZAP_GS 963 /* This can't be a string because the preprocessor needs to see it. */ 964 movl $__USER_DS, %eax 965 movl %eax, %gs 966.endm 967 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 968 xorl %eax, %eax 969 movl %eax, %gs 970 jmp 2b 971 .previous 972 973/* Call softirq on interrupt stack. Interrupts are off. */ 974ENTRY(do_softirq_own_stack) 975 pushq %rbp 976 mov %rsp, %rbp 977 ENTER_IRQ_STACK regs=0 old_rsp=%r11 978 call __do_softirq 979 LEAVE_IRQ_STACK regs=0 980 leaveq 981 ret 982ENDPROC(do_softirq_own_stack) 983 984#ifdef CONFIG_XEN 985idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 986 987/* 988 * A note on the "critical region" in our callback handler. 989 * We want to avoid stacking callback handlers due to events occurring 990 * during handling of the last event. To do this, we keep events disabled 991 * until we've done all processing. HOWEVER, we must enable events before 992 * popping the stack frame (can't be done atomically) and so it would still 993 * be possible to get enough handler activations to overflow the stack. 994 * Although unlikely, bugs of that kind are hard to track down, so we'd 995 * like to avoid the possibility. 996 * So, on entry to the handler we detect whether we interrupted an 997 * existing activation in its critical region -- if so, we pop the current 998 * activation and restart the handler using the previous one. 999 */ 1000ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 1001 1002/* 1003 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1004 * see the correct pointer to the pt_regs 1005 */ 1006 UNWIND_HINT_FUNC 1007 movq %rdi, %rsp /* we don't return, adjust the stack frame */ 1008 UNWIND_HINT_REGS 1009 1010 ENTER_IRQ_STACK old_rsp=%r10 1011 call xen_evtchn_do_upcall 1012 LEAVE_IRQ_STACK 1013 1014#ifndef CONFIG_PREEMPT 1015 call xen_maybe_preempt_hcall 1016#endif 1017 jmp error_exit 1018END(xen_do_hypervisor_callback) 1019 1020/* 1021 * Hypervisor uses this for application faults while it executes. 1022 * We get here for two reasons: 1023 * 1. Fault while reloading DS, ES, FS or GS 1024 * 2. Fault while executing IRET 1025 * Category 1 we do not need to fix up as Xen has already reloaded all segment 1026 * registers that could be reloaded and zeroed the others. 1027 * Category 2 we fix up by killing the current process. We cannot use the 1028 * normal Linux return path in this case because if we use the IRET hypercall 1029 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1030 * We distinguish between categories by comparing each saved segment register 1031 * with its current contents: any discrepancy means we in category 1. 1032 */ 1033ENTRY(xen_failsafe_callback) 1034 UNWIND_HINT_EMPTY 1035 movl %ds, %ecx 1036 cmpw %cx, 0x10(%rsp) 1037 jne 1f 1038 movl %es, %ecx 1039 cmpw %cx, 0x18(%rsp) 1040 jne 1f 1041 movl %fs, %ecx 1042 cmpw %cx, 0x20(%rsp) 1043 jne 1f 1044 movl %gs, %ecx 1045 cmpw %cx, 0x28(%rsp) 1046 jne 1f 1047 /* All segments match their saved values => Category 2 (Bad IRET). */ 1048 movq (%rsp), %rcx 1049 movq 8(%rsp), %r11 1050 addq $0x30, %rsp 1051 pushq $0 /* RIP */ 1052 UNWIND_HINT_IRET_REGS offset=8 1053 jmp general_protection 10541: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1055 movq (%rsp), %rcx 1056 movq 8(%rsp), %r11 1057 addq $0x30, %rsp 1058 UNWIND_HINT_IRET_REGS 1059 pushq $-1 /* orig_ax = -1 => not a system call */ 1060 ALLOC_PT_GPREGS_ON_STACK 1061 SAVE_C_REGS 1062 SAVE_EXTRA_REGS 1063 ENCODE_FRAME_POINTER 1064 jmp error_exit 1065END(xen_failsafe_callback) 1066 1067apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1068 xen_hvm_callback_vector xen_evtchn_do_upcall 1069 1070#endif /* CONFIG_XEN */ 1071 1072#if IS_ENABLED(CONFIG_HYPERV) 1073apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1074 hyperv_callback_vector hyperv_vector_handler 1075#endif /* CONFIG_HYPERV */ 1076 1077idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1078idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1079idtentry stack_segment do_stack_segment has_error_code=1 1080 1081#ifdef CONFIG_XEN 1082idtentry xennmi do_nmi has_error_code=0 1083idtentry xendebug do_debug has_error_code=0 1084idtentry xenint3 do_int3 has_error_code=0 1085#endif 1086 1087idtentry general_protection do_general_protection has_error_code=1 1088idtentry page_fault do_page_fault has_error_code=1 1089 1090#ifdef CONFIG_KVM_GUEST 1091idtentry async_page_fault do_async_page_fault has_error_code=1 1092#endif 1093 1094#ifdef CONFIG_X86_MCE 1095idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1096#endif 1097 1098/* 1099 * Save all registers in pt_regs, and switch gs if needed. 1100 * Use slow, but surefire "are we in kernel?" check. 1101 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1102 */ 1103ENTRY(paranoid_entry) 1104 UNWIND_HINT_FUNC 1105 cld 1106 SAVE_C_REGS 8 1107 SAVE_EXTRA_REGS 8 1108 ENCODE_FRAME_POINTER 8 1109 movl $1, %ebx 1110 movl $MSR_GS_BASE, %ecx 1111 rdmsr 1112 testl %edx, %edx 1113 js 1f /* negative -> in kernel */ 1114 SWAPGS 1115 xorl %ebx, %ebx 11161: ret 1117END(paranoid_entry) 1118 1119/* 1120 * "Paranoid" exit path from exception stack. This is invoked 1121 * only on return from non-NMI IST interrupts that came 1122 * from kernel space. 1123 * 1124 * We may be returning to very strange contexts (e.g. very early 1125 * in syscall entry), so checking for preemption here would 1126 * be complicated. Fortunately, we there's no good reason 1127 * to try to handle preemption here. 1128 * 1129 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1130 */ 1131ENTRY(paranoid_exit) 1132 UNWIND_HINT_REGS 1133 DISABLE_INTERRUPTS(CLBR_ANY) 1134 TRACE_IRQS_OFF_DEBUG 1135 testl %ebx, %ebx /* swapgs needed? */ 1136 jnz .Lparanoid_exit_no_swapgs 1137 TRACE_IRQS_IRETQ 1138 SWAPGS_UNSAFE_STACK 1139 jmp .Lparanoid_exit_restore 1140.Lparanoid_exit_no_swapgs: 1141 TRACE_IRQS_IRETQ_DEBUG 1142.Lparanoid_exit_restore: 1143 jmp restore_regs_and_return_to_kernel 1144END(paranoid_exit) 1145 1146/* 1147 * Save all registers in pt_regs, and switch gs if needed. 1148 * Return: EBX=0: came from user mode; EBX=1: otherwise 1149 */ 1150ENTRY(error_entry) 1151 UNWIND_HINT_FUNC 1152 cld 1153 SAVE_C_REGS 8 1154 SAVE_EXTRA_REGS 8 1155 ENCODE_FRAME_POINTER 8 1156 xorl %ebx, %ebx 1157 testb $3, CS+8(%rsp) 1158 jz .Lerror_kernelspace 1159 1160 /* 1161 * We entered from user mode or we're pretending to have entered 1162 * from user mode due to an IRET fault. 1163 */ 1164 SWAPGS 1165 1166.Lerror_entry_from_usermode_after_swapgs: 1167 /* 1168 * We need to tell lockdep that IRQs are off. We can't do this until 1169 * we fix gsbase, and we should do it before enter_from_user_mode 1170 * (which can take locks). 1171 */ 1172 TRACE_IRQS_OFF 1173 CALL_enter_from_user_mode 1174 ret 1175 1176.Lerror_entry_done: 1177 TRACE_IRQS_OFF 1178 ret 1179 1180 /* 1181 * There are two places in the kernel that can potentially fault with 1182 * usergs. Handle them here. B stepping K8s sometimes report a 1183 * truncated RIP for IRET exceptions returning to compat mode. Check 1184 * for these here too. 1185 */ 1186.Lerror_kernelspace: 1187 incl %ebx 1188 leaq native_irq_return_iret(%rip), %rcx 1189 cmpq %rcx, RIP+8(%rsp) 1190 je .Lerror_bad_iret 1191 movl %ecx, %eax /* zero extend */ 1192 cmpq %rax, RIP+8(%rsp) 1193 je .Lbstep_iret 1194 cmpq $.Lgs_change, RIP+8(%rsp) 1195 jne .Lerror_entry_done 1196 1197 /* 1198 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1199 * gsbase and proceed. We'll fix up the exception and land in 1200 * .Lgs_change's error handler with kernel gsbase. 1201 */ 1202 SWAPGS 1203 jmp .Lerror_entry_done 1204 1205.Lbstep_iret: 1206 /* Fix truncated RIP */ 1207 movq %rcx, RIP+8(%rsp) 1208 /* fall through */ 1209 1210.Lerror_bad_iret: 1211 /* 1212 * We came from an IRET to user mode, so we have user gsbase. 1213 * Switch to kernel gsbase: 1214 */ 1215 SWAPGS 1216 1217 /* 1218 * Pretend that the exception came from user mode: set up pt_regs 1219 * as if we faulted immediately after IRET and clear EBX so that 1220 * error_exit knows that we will be returning to user mode. 1221 */ 1222 mov %rsp, %rdi 1223 call fixup_bad_iret 1224 mov %rax, %rsp 1225 decl %ebx 1226 jmp .Lerror_entry_from_usermode_after_swapgs 1227END(error_entry) 1228 1229 1230/* 1231 * On entry, EBX is a "return to kernel mode" flag: 1232 * 1: already in kernel mode, don't need SWAPGS 1233 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1234 */ 1235ENTRY(error_exit) 1236 UNWIND_HINT_REGS 1237 DISABLE_INTERRUPTS(CLBR_ANY) 1238 TRACE_IRQS_OFF 1239 testl %ebx, %ebx 1240 jnz retint_kernel 1241 jmp retint_user 1242END(error_exit) 1243 1244/* 1245 * Runs on exception stack. Xen PV does not go through this path at all, 1246 * so we can use real assembly here. 1247 */ 1248ENTRY(nmi) 1249 UNWIND_HINT_IRET_REGS 1250 1251 /* 1252 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1253 * the iretq it performs will take us out of NMI context. 1254 * This means that we can have nested NMIs where the next 1255 * NMI is using the top of the stack of the previous NMI. We 1256 * can't let it execute because the nested NMI will corrupt the 1257 * stack of the previous NMI. NMI handlers are not re-entrant 1258 * anyway. 1259 * 1260 * To handle this case we do the following: 1261 * Check the a special location on the stack that contains 1262 * a variable that is set when NMIs are executing. 1263 * The interrupted task's stack is also checked to see if it 1264 * is an NMI stack. 1265 * If the variable is not set and the stack is not the NMI 1266 * stack then: 1267 * o Set the special variable on the stack 1268 * o Copy the interrupt frame into an "outermost" location on the 1269 * stack 1270 * o Copy the interrupt frame into an "iret" location on the stack 1271 * o Continue processing the NMI 1272 * If the variable is set or the previous stack is the NMI stack: 1273 * o Modify the "iret" location to jump to the repeat_nmi 1274 * o return back to the first NMI 1275 * 1276 * Now on exit of the first NMI, we first clear the stack variable 1277 * The NMI stack will tell any nested NMIs at that point that it is 1278 * nested. Then we pop the stack normally with iret, and if there was 1279 * a nested NMI that updated the copy interrupt stack frame, a 1280 * jump will be made to the repeat_nmi code that will handle the second 1281 * NMI. 1282 * 1283 * However, espfix prevents us from directly returning to userspace 1284 * with a single IRET instruction. Similarly, IRET to user mode 1285 * can fault. We therefore handle NMIs from user space like 1286 * other IST entries. 1287 */ 1288 1289 ASM_CLAC 1290 1291 /* Use %rdx as our temp variable throughout */ 1292 pushq %rdx 1293 1294 testb $3, CS-RIP+8(%rsp) 1295 jz .Lnmi_from_kernel 1296 1297 /* 1298 * NMI from user mode. We need to run on the thread stack, but we 1299 * can't go through the normal entry paths: NMIs are masked, and 1300 * we don't want to enable interrupts, because then we'll end 1301 * up in an awkward situation in which IRQs are on but NMIs 1302 * are off. 1303 * 1304 * We also must not push anything to the stack before switching 1305 * stacks lest we corrupt the "NMI executing" variable. 1306 */ 1307 1308 swapgs 1309 cld 1310 movq %rsp, %rdx 1311 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1312 UNWIND_HINT_IRET_REGS base=%rdx offset=8 1313 pushq 5*8(%rdx) /* pt_regs->ss */ 1314 pushq 4*8(%rdx) /* pt_regs->rsp */ 1315 pushq 3*8(%rdx) /* pt_regs->flags */ 1316 pushq 2*8(%rdx) /* pt_regs->cs */ 1317 pushq 1*8(%rdx) /* pt_regs->rip */ 1318 UNWIND_HINT_IRET_REGS 1319 pushq $-1 /* pt_regs->orig_ax */ 1320 pushq %rdi /* pt_regs->di */ 1321 pushq %rsi /* pt_regs->si */ 1322 pushq (%rdx) /* pt_regs->dx */ 1323 pushq %rcx /* pt_regs->cx */ 1324 pushq %rax /* pt_regs->ax */ 1325 pushq %r8 /* pt_regs->r8 */ 1326 pushq %r9 /* pt_regs->r9 */ 1327 pushq %r10 /* pt_regs->r10 */ 1328 pushq %r11 /* pt_regs->r11 */ 1329 pushq %rbx /* pt_regs->rbx */ 1330 pushq %rbp /* pt_regs->rbp */ 1331 pushq %r12 /* pt_regs->r12 */ 1332 pushq %r13 /* pt_regs->r13 */ 1333 pushq %r14 /* pt_regs->r14 */ 1334 pushq %r15 /* pt_regs->r15 */ 1335 UNWIND_HINT_REGS 1336 ENCODE_FRAME_POINTER 1337 1338 /* 1339 * At this point we no longer need to worry about stack damage 1340 * due to nesting -- we're on the normal thread stack and we're 1341 * done with the NMI stack. 1342 */ 1343 1344 movq %rsp, %rdi 1345 movq $-1, %rsi 1346 call do_nmi 1347 1348 /* 1349 * Return back to user mode. We must *not* do the normal exit 1350 * work, because we don't want to enable interrupts. 1351 */ 1352 jmp swapgs_restore_regs_and_return_to_usermode 1353 1354.Lnmi_from_kernel: 1355 /* 1356 * Here's what our stack frame will look like: 1357 * +---------------------------------------------------------+ 1358 * | original SS | 1359 * | original Return RSP | 1360 * | original RFLAGS | 1361 * | original CS | 1362 * | original RIP | 1363 * +---------------------------------------------------------+ 1364 * | temp storage for rdx | 1365 * +---------------------------------------------------------+ 1366 * | "NMI executing" variable | 1367 * +---------------------------------------------------------+ 1368 * | iret SS } Copied from "outermost" frame | 1369 * | iret Return RSP } on each loop iteration; overwritten | 1370 * | iret RFLAGS } by a nested NMI to force another | 1371 * | iret CS } iteration if needed. | 1372 * | iret RIP } | 1373 * +---------------------------------------------------------+ 1374 * | outermost SS } initialized in first_nmi; | 1375 * | outermost Return RSP } will not be changed before | 1376 * | outermost RFLAGS } NMI processing is done. | 1377 * | outermost CS } Copied to "iret" frame on each | 1378 * | outermost RIP } iteration. | 1379 * +---------------------------------------------------------+ 1380 * | pt_regs | 1381 * +---------------------------------------------------------+ 1382 * 1383 * The "original" frame is used by hardware. Before re-enabling 1384 * NMIs, we need to be done with it, and we need to leave enough 1385 * space for the asm code here. 1386 * 1387 * We return by executing IRET while RSP points to the "iret" frame. 1388 * That will either return for real or it will loop back into NMI 1389 * processing. 1390 * 1391 * The "outermost" frame is copied to the "iret" frame on each 1392 * iteration of the loop, so each iteration starts with the "iret" 1393 * frame pointing to the final return target. 1394 */ 1395 1396 /* 1397 * Determine whether we're a nested NMI. 1398 * 1399 * If we interrupted kernel code between repeat_nmi and 1400 * end_repeat_nmi, then we are a nested NMI. We must not 1401 * modify the "iret" frame because it's being written by 1402 * the outer NMI. That's okay; the outer NMI handler is 1403 * about to about to call do_nmi anyway, so we can just 1404 * resume the outer NMI. 1405 */ 1406 1407 movq $repeat_nmi, %rdx 1408 cmpq 8(%rsp), %rdx 1409 ja 1f 1410 movq $end_repeat_nmi, %rdx 1411 cmpq 8(%rsp), %rdx 1412 ja nested_nmi_out 14131: 1414 1415 /* 1416 * Now check "NMI executing". If it's set, then we're nested. 1417 * This will not detect if we interrupted an outer NMI just 1418 * before IRET. 1419 */ 1420 cmpl $1, -8(%rsp) 1421 je nested_nmi 1422 1423 /* 1424 * Now test if the previous stack was an NMI stack. This covers 1425 * the case where we interrupt an outer NMI after it clears 1426 * "NMI executing" but before IRET. We need to be careful, though: 1427 * there is one case in which RSP could point to the NMI stack 1428 * despite there being no NMI active: naughty userspace controls 1429 * RSP at the very beginning of the SYSCALL targets. We can 1430 * pull a fast one on naughty userspace, though: we program 1431 * SYSCALL to mask DF, so userspace cannot cause DF to be set 1432 * if it controls the kernel's RSP. We set DF before we clear 1433 * "NMI executing". 1434 */ 1435 lea 6*8(%rsp), %rdx 1436 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1437 cmpq %rdx, 4*8(%rsp) 1438 /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1439 ja first_nmi 1440 1441 subq $EXCEPTION_STKSZ, %rdx 1442 cmpq %rdx, 4*8(%rsp) 1443 /* If it is below the NMI stack, it is a normal NMI */ 1444 jb first_nmi 1445 1446 /* Ah, it is within the NMI stack. */ 1447 1448 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1449 jz first_nmi /* RSP was user controlled. */ 1450 1451 /* This is a nested NMI. */ 1452 1453nested_nmi: 1454 /* 1455 * Modify the "iret" frame to point to repeat_nmi, forcing another 1456 * iteration of NMI handling. 1457 */ 1458 subq $8, %rsp 1459 leaq -10*8(%rsp), %rdx 1460 pushq $__KERNEL_DS 1461 pushq %rdx 1462 pushfq 1463 pushq $__KERNEL_CS 1464 pushq $repeat_nmi 1465 1466 /* Put stack back */ 1467 addq $(6*8), %rsp 1468 1469nested_nmi_out: 1470 popq %rdx 1471 1472 /* We are returning to kernel mode, so this cannot result in a fault. */ 1473 iretq 1474 1475first_nmi: 1476 /* Restore rdx. */ 1477 movq (%rsp), %rdx 1478 1479 /* Make room for "NMI executing". */ 1480 pushq $0 1481 1482 /* Leave room for the "iret" frame */ 1483 subq $(5*8), %rsp 1484 1485 /* Copy the "original" frame to the "outermost" frame */ 1486 .rept 5 1487 pushq 11*8(%rsp) 1488 .endr 1489 UNWIND_HINT_IRET_REGS 1490 1491 /* Everything up to here is safe from nested NMIs */ 1492 1493#ifdef CONFIG_DEBUG_ENTRY 1494 /* 1495 * For ease of testing, unmask NMIs right away. Disabled by 1496 * default because IRET is very expensive. 1497 */ 1498 pushq $0 /* SS */ 1499 pushq %rsp /* RSP (minus 8 because of the previous push) */ 1500 addq $8, (%rsp) /* Fix up RSP */ 1501 pushfq /* RFLAGS */ 1502 pushq $__KERNEL_CS /* CS */ 1503 pushq $1f /* RIP */ 1504 iretq /* continues at repeat_nmi below */ 1505 UNWIND_HINT_IRET_REGS 15061: 1507#endif 1508 1509repeat_nmi: 1510 /* 1511 * If there was a nested NMI, the first NMI's iret will return 1512 * here. But NMIs are still enabled and we can take another 1513 * nested NMI. The nested NMI checks the interrupted RIP to see 1514 * if it is between repeat_nmi and end_repeat_nmi, and if so 1515 * it will just return, as we are about to repeat an NMI anyway. 1516 * This makes it safe to copy to the stack frame that a nested 1517 * NMI will update. 1518 * 1519 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 1520 * we're repeating an NMI, gsbase has the same value that it had on 1521 * the first iteration. paranoid_entry will load the kernel 1522 * gsbase if needed before we call do_nmi. "NMI executing" 1523 * is zero. 1524 */ 1525 movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1526 1527 /* 1528 * Copy the "outermost" frame to the "iret" frame. NMIs that nest 1529 * here must not modify the "iret" frame while we're writing to 1530 * it or it will end up containing garbage. 1531 */ 1532 addq $(10*8), %rsp 1533 .rept 5 1534 pushq -6*8(%rsp) 1535 .endr 1536 subq $(5*8), %rsp 1537end_repeat_nmi: 1538 1539 /* 1540 * Everything below this point can be preempted by a nested NMI. 1541 * If this happens, then the inner NMI will change the "iret" 1542 * frame to point back to repeat_nmi. 1543 */ 1544 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1545 ALLOC_PT_GPREGS_ON_STACK 1546 1547 /* 1548 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1549 * as we should not be calling schedule in NMI context. 1550 * Even with normal interrupts enabled. An NMI should not be 1551 * setting NEED_RESCHED or anything that normal interrupts and 1552 * exceptions might do. 1553 */ 1554 call paranoid_entry 1555 UNWIND_HINT_REGS 1556 1557 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1558 movq %rsp, %rdi 1559 movq $-1, %rsi 1560 call do_nmi 1561 1562 testl %ebx, %ebx /* swapgs needed? */ 1563 jnz nmi_restore 1564nmi_swapgs: 1565 SWAPGS_UNSAFE_STACK 1566nmi_restore: 1567 POP_EXTRA_REGS 1568 POP_C_REGS 1569 1570 /* 1571 * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1572 * at the "iret" frame. 1573 */ 1574 addq $6*8, %rsp 1575 1576 /* 1577 * Clear "NMI executing". Set DF first so that we can easily 1578 * distinguish the remaining code between here and IRET from 1579 * the SYSCALL entry and exit paths. 1580 * 1581 * We arguably should just inspect RIP instead, but I (Andy) wrote 1582 * this code when I had the misapprehension that Xen PV supported 1583 * NMIs, and Xen PV would break that approach. 1584 */ 1585 std 1586 movq $0, 5*8(%rsp) /* clear "NMI executing" */ 1587 1588 /* 1589 * iretq reads the "iret" frame and exits the NMI stack in a 1590 * single instruction. We are returning to kernel mode, so this 1591 * cannot result in a fault. Similarly, we don't need to worry 1592 * about espfix64 on the way back to kernel mode. 1593 */ 1594 iretq 1595END(nmi) 1596 1597ENTRY(ignore_sysret) 1598 UNWIND_HINT_EMPTY 1599 mov $-ENOSYS, %eax 1600 sysret 1601END(ignore_sysret) 1602 1603ENTRY(rewind_stack_do_exit) 1604 UNWIND_HINT_FUNC 1605 /* Prevent any naive code from trying to unwind to our caller. */ 1606 xorl %ebp, %ebp 1607 1608 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 1609 leaq -PTREGS_SIZE(%rax), %rsp 1610 UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 1611 1612 call do_exit 1613END(rewind_stack_do_exit) 1614