1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11905a36a2SIngo Molnar * Some of this is documented in Documentation/x86/entry_64.txt 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 184d732138SIngo Molnar * - ENTRY/END: Define functions in the symbol table. 194d732138SIngo Molnar * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 204d732138SIngo Molnar * - idtentry: Define exception entry points. 21905a36a2SIngo Molnar */ 22905a36a2SIngo Molnar#include <linux/linkage.h> 23905a36a2SIngo Molnar#include <asm/segment.h> 24905a36a2SIngo Molnar#include <asm/cache.h> 25905a36a2SIngo Molnar#include <asm/errno.h> 26d36f9479SIngo Molnar#include "calling.h" 27905a36a2SIngo Molnar#include <asm/asm-offsets.h> 28905a36a2SIngo Molnar#include <asm/msr.h> 29905a36a2SIngo Molnar#include <asm/unistd.h> 30905a36a2SIngo Molnar#include <asm/thread_info.h> 31905a36a2SIngo Molnar#include <asm/hw_irq.h> 32905a36a2SIngo Molnar#include <asm/page_types.h> 33905a36a2SIngo Molnar#include <asm/irqflags.h> 34905a36a2SIngo Molnar#include <asm/paravirt.h> 35905a36a2SIngo Molnar#include <asm/percpu.h> 36905a36a2SIngo Molnar#include <asm/asm.h> 37905a36a2SIngo Molnar#include <asm/smap.h> 38905a36a2SIngo Molnar#include <asm/pgtable_types.h> 39784d5699SAl Viro#include <asm/export.h> 408c1f7558SJosh Poimboeuf#include <asm/frame.h> 41905a36a2SIngo Molnar#include <linux/err.h> 42905a36a2SIngo Molnar 43905a36a2SIngo Molnar.code64 44905a36a2SIngo Molnar.section .entry.text, "ax" 45905a36a2SIngo Molnar 46905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT 47905a36a2SIngo MolnarENTRY(native_usergs_sysret64) 488c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 49905a36a2SIngo Molnar swapgs 50905a36a2SIngo Molnar sysretq 518c1f7558SJosh PoimboeufEND(native_usergs_sysret64) 52905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */ 53905a36a2SIngo Molnar 54905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ 55905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS 56905a36a2SIngo Molnar bt $9, EFLAGS(%rsp) /* interrupts off? */ 57905a36a2SIngo Molnar jnc 1f 58905a36a2SIngo Molnar TRACE_IRQS_ON 59905a36a2SIngo Molnar1: 60905a36a2SIngo Molnar#endif 61905a36a2SIngo Molnar.endm 62905a36a2SIngo Molnar 63905a36a2SIngo Molnar/* 64905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint 65905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update 66905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time 67905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler 68905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 69905a36a2SIngo Molnar * 70905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 71905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top 72905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack. 73905a36a2SIngo Molnar */ 74905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 75905a36a2SIngo Molnar 76905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG 77905a36a2SIngo Molnar call debug_stack_set_zero 78905a36a2SIngo Molnar TRACE_IRQS_OFF 79905a36a2SIngo Molnar call debug_stack_reset 80905a36a2SIngo Molnar.endm 81905a36a2SIngo Molnar 82905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG 83905a36a2SIngo Molnar call debug_stack_set_zero 84905a36a2SIngo Molnar TRACE_IRQS_ON 85905a36a2SIngo Molnar call debug_stack_reset 86905a36a2SIngo Molnar.endm 87905a36a2SIngo Molnar 88905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG 89905a36a2SIngo Molnar bt $9, EFLAGS(%rsp) /* interrupts off? */ 90905a36a2SIngo Molnar jnc 1f 91905a36a2SIngo Molnar TRACE_IRQS_ON_DEBUG 92905a36a2SIngo Molnar1: 93905a36a2SIngo Molnar.endm 94905a36a2SIngo Molnar 95905a36a2SIngo Molnar#else 96905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 97905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 98905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 99905a36a2SIngo Molnar#endif 100905a36a2SIngo Molnar 101905a36a2SIngo Molnar/* 1024d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 103905a36a2SIngo Molnar * 104fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 105fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 106fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 107fda57b22SAndy Lutomirski * available when SYSCALL is used. 108fda57b22SAndy Lutomirski * 109fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 110fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 111fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 112fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 113fda57b22SAndy Lutomirski * 1144d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 115905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 116905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 117905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 118905a36a2SIngo Molnar * and does not change rsp. 119905a36a2SIngo Molnar * 120905a36a2SIngo Molnar * Registers on entry: 121905a36a2SIngo Molnar * rax system call number 122905a36a2SIngo Molnar * rcx return address 123905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 124905a36a2SIngo Molnar * rdi arg0 125905a36a2SIngo Molnar * rsi arg1 126905a36a2SIngo Molnar * rdx arg2 127905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 128905a36a2SIngo Molnar * r8 arg4 129905a36a2SIngo Molnar * r9 arg5 130905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 131905a36a2SIngo Molnar * 132905a36a2SIngo Molnar * Only called from user space. 133905a36a2SIngo Molnar * 134905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 135905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 136905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 137905a36a2SIngo Molnar */ 138905a36a2SIngo Molnar 139b2502b41SIngo MolnarENTRY(entry_SYSCALL_64) 1408c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 141905a36a2SIngo Molnar /* 142905a36a2SIngo Molnar * Interrupts are off on entry. 143905a36a2SIngo Molnar * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 144905a36a2SIngo Molnar * it is too small to ever cause noticeable irq latency. 145905a36a2SIngo Molnar */ 146905a36a2SIngo Molnar 1478a9949bcSAndy Lutomirski swapgs 148905a36a2SIngo Molnar movq %rsp, PER_CPU_VAR(rsp_scratch) 149905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 150905a36a2SIngo Molnar 1511e423bffSAndy Lutomirski TRACE_IRQS_OFF 1521e423bffSAndy Lutomirski 153905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 154905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 155905a36a2SIngo Molnar pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 156905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 157905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 158905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 1598a9949bcSAndy LutomirskiGLOBAL(entry_SYSCALL_64_after_hwframe) 160905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 161905a36a2SIngo Molnar pushq %rdi /* pt_regs->di */ 162905a36a2SIngo Molnar pushq %rsi /* pt_regs->si */ 163905a36a2SIngo Molnar pushq %rdx /* pt_regs->dx */ 164905a36a2SIngo Molnar pushq %rcx /* pt_regs->cx */ 165905a36a2SIngo Molnar pushq $-ENOSYS /* pt_regs->ax */ 166905a36a2SIngo Molnar pushq %r8 /* pt_regs->r8 */ 167905a36a2SIngo Molnar pushq %r9 /* pt_regs->r9 */ 168905a36a2SIngo Molnar pushq %r10 /* pt_regs->r10 */ 169905a36a2SIngo Molnar pushq %r11 /* pt_regs->r11 */ 170905a36a2SIngo Molnar sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ 1718c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 172905a36a2SIngo Molnar 1731e423bffSAndy Lutomirski /* 1741e423bffSAndy Lutomirski * If we need to do entry work or if we guess we'll need to do 1751e423bffSAndy Lutomirski * exit work, go straight to the slow path. 1761e423bffSAndy Lutomirski */ 17715f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 17815f4eae7SAndy Lutomirski testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 1791e423bffSAndy Lutomirski jnz entry_SYSCALL64_slow_path 1801e423bffSAndy Lutomirski 181b2502b41SIngo Molnarentry_SYSCALL_64_fastpath: 1821e423bffSAndy Lutomirski /* 1831e423bffSAndy Lutomirski * Easy case: enable interrupts and issue the syscall. If the syscall 1841e423bffSAndy Lutomirski * needs pt_regs, we'll call a stub that disables interrupts again 1851e423bffSAndy Lutomirski * and jumps to the slow path. 1861e423bffSAndy Lutomirski */ 1871e423bffSAndy Lutomirski TRACE_IRQS_ON 1881e423bffSAndy Lutomirski ENABLE_INTERRUPTS(CLBR_NONE) 189905a36a2SIngo Molnar#if __SYSCALL_MASK == ~0 190905a36a2SIngo Molnar cmpq $__NR_syscall_max, %rax 191905a36a2SIngo Molnar#else 192905a36a2SIngo Molnar andl $__SYSCALL_MASK, %eax 193905a36a2SIngo Molnar cmpl $__NR_syscall_max, %eax 194905a36a2SIngo Molnar#endif 195905a36a2SIngo Molnar ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 196905a36a2SIngo Molnar movq %r10, %rcx 197302f5b26SAndy Lutomirski 198302f5b26SAndy Lutomirski /* 199302f5b26SAndy Lutomirski * This call instruction is handled specially in stub_ptregs_64. 200b7765086SAndy Lutomirski * It might end up jumping to the slow path. If it jumps, RAX 201b7765086SAndy Lutomirski * and all argument registers are clobbered. 202302f5b26SAndy Lutomirski */ 203905a36a2SIngo Molnar call *sys_call_table(, %rax, 8) 204302f5b26SAndy Lutomirski.Lentry_SYSCALL_64_after_fastpath_call: 205302f5b26SAndy Lutomirski 206905a36a2SIngo Molnar movq %rax, RAX(%rsp) 207905a36a2SIngo Molnar1: 2081e423bffSAndy Lutomirski 209905a36a2SIngo Molnar /* 2101e423bffSAndy Lutomirski * If we get here, then we know that pt_regs is clean for SYSRET64. 2111e423bffSAndy Lutomirski * If we see that no exit work is required (which we are required 2121e423bffSAndy Lutomirski * to check with IRQs off), then we can go straight to SYSRET64. 213905a36a2SIngo Molnar */ 2142140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 2151e423bffSAndy Lutomirski TRACE_IRQS_OFF 21615f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 21715f4eae7SAndy Lutomirski testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 2181e423bffSAndy Lutomirski jnz 1f 219905a36a2SIngo Molnar 2201e423bffSAndy Lutomirski LOCKDEP_SYS_EXIT 2211e423bffSAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQs on */ 222eb2a54c3SAndy Lutomirski movq RIP(%rsp), %rcx 223eb2a54c3SAndy Lutomirski movq EFLAGS(%rsp), %r11 224a5122106SAndy Lutomirski addq $6*8, %rsp /* skip extra regs -- they were preserved */ 2258c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 226a5122106SAndy Lutomirski jmp .Lpop_c_regs_except_rcx_r11_and_sysret 227905a36a2SIngo Molnar 2281e423bffSAndy Lutomirski1: 2291e423bffSAndy Lutomirski /* 2301e423bffSAndy Lutomirski * The fast path looked good when we started, but something changed 2311e423bffSAndy Lutomirski * along the way and we need to switch to the slow path. Calling 2321e423bffSAndy Lutomirski * raise(3) will trigger this, for example. IRQs are off. 2331e423bffSAndy Lutomirski */ 23429ea1b25SAndy Lutomirski TRACE_IRQS_ON 2352140a994SJan Beulich ENABLE_INTERRUPTS(CLBR_ANY) 236905a36a2SIngo Molnar SAVE_EXTRA_REGS 23729ea1b25SAndy Lutomirski movq %rsp, %rdi 23829ea1b25SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 2391e423bffSAndy Lutomirski jmp return_from_SYSCALL_64 2401e423bffSAndy Lutomirski 2411e423bffSAndy Lutomirskientry_SYSCALL64_slow_path: 2421e423bffSAndy Lutomirski /* IRQs are off. */ 2431e423bffSAndy Lutomirski SAVE_EXTRA_REGS 2441e423bffSAndy Lutomirski movq %rsp, %rdi 2451e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 2461e423bffSAndy Lutomirski 2471e423bffSAndy Lutomirskireturn_from_SYSCALL_64: 24829ea1b25SAndy Lutomirski TRACE_IRQS_IRETQ /* we're about to change IF */ 249905a36a2SIngo Molnar 250905a36a2SIngo Molnar /* 251905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 2528a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 2538a055d7fSAndy Lutomirski * go to the slow exit path. 254905a36a2SIngo Molnar */ 255905a36a2SIngo Molnar movq RCX(%rsp), %rcx 256905a36a2SIngo Molnar movq RIP(%rsp), %r11 2578a055d7fSAndy Lutomirski 2588a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 2598a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 260905a36a2SIngo Molnar 261905a36a2SIngo Molnar /* 262905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 263905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 264905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 265905a36a2SIngo Molnar * 266905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 267905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 268361b4b58SKirill A. Shutemov * 269cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 270cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 271905a36a2SIngo Molnar */ 272905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 273905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 2744d732138SIngo Molnar 275905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 276905a36a2SIngo Molnar cmpq %rcx, %r11 2778a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 278905a36a2SIngo Molnar 279905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 2808a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 281905a36a2SIngo Molnar 282905a36a2SIngo Molnar movq R11(%rsp), %r11 283905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 2848a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 285905a36a2SIngo Molnar 286905a36a2SIngo Molnar /* 2873e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 2883e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 2893e035305SBorislav Petkov * need to restore it correctly. 2903e035305SBorislav Petkov * 2913e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 2923e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 2933e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 2943e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 2953e035305SBorislav Petkov * this user code: 296905a36a2SIngo Molnar * 297905a36a2SIngo Molnar * movq $stuck_here, %rcx 298905a36a2SIngo Molnar * pushfq 299905a36a2SIngo Molnar * popq %r11 300905a36a2SIngo Molnar * stuck_here: 301905a36a2SIngo Molnar * 302905a36a2SIngo Molnar * would never get past 'stuck_here'. 303905a36a2SIngo Molnar */ 304905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 3058a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 306905a36a2SIngo Molnar 307905a36a2SIngo Molnar /* nothing to check for RSP */ 308905a36a2SIngo Molnar 309905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 3108a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 311905a36a2SIngo Molnar 312905a36a2SIngo Molnar /* 313905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 314905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 315905a36a2SIngo Molnar */ 316905a36a2SIngo Molnarsyscall_return_via_sysret: 317905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 3188c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 3194fbb3910SAndy Lutomirski POP_EXTRA_REGS 320a5122106SAndy Lutomirski.Lpop_c_regs_except_rcx_r11_and_sysret: 3214fbb3910SAndy Lutomirski popq %rsi /* skip r11 */ 3224fbb3910SAndy Lutomirski popq %r10 3234fbb3910SAndy Lutomirski popq %r9 3244fbb3910SAndy Lutomirski popq %r8 3254fbb3910SAndy Lutomirski popq %rax 3264fbb3910SAndy Lutomirski popq %rsi /* skip rcx */ 3274fbb3910SAndy Lutomirski popq %rdx 3284fbb3910SAndy Lutomirski popq %rsi 3294fbb3910SAndy Lutomirski popq %rdi 3304fbb3910SAndy Lutomirski movq RSP-ORIG_RAX(%rsp), %rsp 331905a36a2SIngo Molnar USERGS_SYSRET64 332b2502b41SIngo MolnarEND(entry_SYSCALL_64) 333905a36a2SIngo Molnar 334302f5b26SAndy LutomirskiENTRY(stub_ptregs_64) 335302f5b26SAndy Lutomirski /* 336302f5b26SAndy Lutomirski * Syscalls marked as needing ptregs land here. 337b7765086SAndy Lutomirski * If we are on the fast path, we need to save the extra regs, 338b7765086SAndy Lutomirski * which we achieve by trying again on the slow path. If we are on 339b7765086SAndy Lutomirski * the slow path, the extra regs are already saved. 340302f5b26SAndy Lutomirski * 341302f5b26SAndy Lutomirski * RAX stores a pointer to the C function implementing the syscall. 342b7765086SAndy Lutomirski * IRQs are on. 343302f5b26SAndy Lutomirski */ 344302f5b26SAndy Lutomirski cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) 345302f5b26SAndy Lutomirski jne 1f 346302f5b26SAndy Lutomirski 347b7765086SAndy Lutomirski /* 348b7765086SAndy Lutomirski * Called from fast path -- disable IRQs again, pop return address 349b7765086SAndy Lutomirski * and jump to slow path 350b7765086SAndy Lutomirski */ 3512140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 352b7765086SAndy Lutomirski TRACE_IRQS_OFF 353302f5b26SAndy Lutomirski popq %rax 3548c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 355b7765086SAndy Lutomirski jmp entry_SYSCALL64_slow_path 356302f5b26SAndy Lutomirski 357302f5b26SAndy Lutomirski1: 358b3830e8dSBorislav Petkov jmp *%rax /* Called from C */ 359302f5b26SAndy LutomirskiEND(stub_ptregs_64) 360302f5b26SAndy Lutomirski 361302f5b26SAndy Lutomirski.macro ptregs_stub func 362302f5b26SAndy LutomirskiENTRY(ptregs_\func) 3638c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 364302f5b26SAndy Lutomirski leaq \func(%rip), %rax 365302f5b26SAndy Lutomirski jmp stub_ptregs_64 366302f5b26SAndy LutomirskiEND(ptregs_\func) 367302f5b26SAndy Lutomirski.endm 368302f5b26SAndy Lutomirski 369302f5b26SAndy Lutomirski/* Instantiate ptregs_stub for each ptregs-using syscall */ 370302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_(sym) 371302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym 372302f5b26SAndy Lutomirski#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) 373302f5b26SAndy Lutomirski#include <asm/syscalls_64.h> 374905a36a2SIngo Molnar 375905a36a2SIngo Molnar/* 3760100301bSBrian Gerst * %rdi: prev task 3770100301bSBrian Gerst * %rsi: next task 3780100301bSBrian Gerst */ 3790100301bSBrian GerstENTRY(__switch_to_asm) 3808c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 3810100301bSBrian Gerst /* 3820100301bSBrian Gerst * Save callee-saved registers 3830100301bSBrian Gerst * This must match the order in inactive_task_frame 3840100301bSBrian Gerst */ 3850100301bSBrian Gerst pushq %rbp 3860100301bSBrian Gerst pushq %rbx 3870100301bSBrian Gerst pushq %r12 3880100301bSBrian Gerst pushq %r13 3890100301bSBrian Gerst pushq %r14 3900100301bSBrian Gerst pushq %r15 3910100301bSBrian Gerst 3920100301bSBrian Gerst /* switch stack */ 3930100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 3940100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 3950100301bSBrian Gerst 3960100301bSBrian Gerst#ifdef CONFIG_CC_STACKPROTECTOR 3970100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 3980100301bSBrian Gerst movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 3990100301bSBrian Gerst#endif 4000100301bSBrian Gerst 4010100301bSBrian Gerst /* restore callee-saved registers */ 4020100301bSBrian Gerst popq %r15 4030100301bSBrian Gerst popq %r14 4040100301bSBrian Gerst popq %r13 4050100301bSBrian Gerst popq %r12 4060100301bSBrian Gerst popq %rbx 4070100301bSBrian Gerst popq %rbp 4080100301bSBrian Gerst 4090100301bSBrian Gerst jmp __switch_to 4100100301bSBrian GerstEND(__switch_to_asm) 4110100301bSBrian Gerst 4120100301bSBrian Gerst/* 413905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 414905a36a2SIngo Molnar * 4150100301bSBrian Gerst * rax: prev task we switched from 416616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 417616d2483SBrian Gerst * r12: kernel thread arg 418905a36a2SIngo Molnar */ 419905a36a2SIngo MolnarENTRY(ret_from_fork) 4208c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 4210100301bSBrian Gerst movq %rax, %rdi 4224d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 423905a36a2SIngo Molnar 424616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 425616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 426905a36a2SIngo Molnar 427616d2483SBrian Gerst2: 4288c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 429ebd57499SJosh Poimboeuf movq %rsp, %rdi 43024d978b7SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 43124d978b7SAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQS on */ 4328a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 433616d2483SBrian Gerst 434616d2483SBrian Gerst1: 435616d2483SBrian Gerst /* kernel thread */ 436616d2483SBrian Gerst movq %r12, %rdi 437616d2483SBrian Gerst call *%rbx 438616d2483SBrian Gerst /* 439616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 440616d2483SBrian Gerst * calling do_execve(). Exit to userspace to complete the execve() 441616d2483SBrian Gerst * syscall. 442616d2483SBrian Gerst */ 443616d2483SBrian Gerst movq $0, RAX(%rsp) 444616d2483SBrian Gerst jmp 2b 445905a36a2SIngo MolnarEND(ret_from_fork) 446905a36a2SIngo Molnar 447905a36a2SIngo Molnar/* 448905a36a2SIngo Molnar * Build the entry stubs with some assembler magic. 449905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block. 450905a36a2SIngo Molnar */ 451905a36a2SIngo Molnar .align 8 452905a36a2SIngo MolnarENTRY(irq_entries_start) 453905a36a2SIngo Molnar vector=FIRST_EXTERNAL_VECTOR 454905a36a2SIngo Molnar .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 4558c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 456905a36a2SIngo Molnar pushq $(~vector+0x80) /* Note: always in signed byte range */ 457905a36a2SIngo Molnar jmp common_interrupt 458905a36a2SIngo Molnar .align 8 4598c1f7558SJosh Poimboeuf vector=vector+1 460905a36a2SIngo Molnar .endr 461905a36a2SIngo MolnarEND(irq_entries_start) 462905a36a2SIngo Molnar 4631d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 4641d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 465e17f8234SBoris Ostrovsky pushq %rax 466e17f8234SBoris Ostrovsky SAVE_FLAGS(CLBR_RAX) 467e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 4681d3e53e8SAndy Lutomirski jz .Lokay_\@ 4691d3e53e8SAndy Lutomirski ud2 4701d3e53e8SAndy Lutomirski.Lokay_\@: 471e17f8234SBoris Ostrovsky popq %rax 4721d3e53e8SAndy Lutomirski#endif 4731d3e53e8SAndy Lutomirski.endm 4741d3e53e8SAndy Lutomirski 4751d3e53e8SAndy Lutomirski/* 4761d3e53e8SAndy Lutomirski * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 4771d3e53e8SAndy Lutomirski * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 4781d3e53e8SAndy Lutomirski * Requires kernel GSBASE. 4791d3e53e8SAndy Lutomirski * 4801d3e53e8SAndy Lutomirski * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 4811d3e53e8SAndy Lutomirski */ 4828c1f7558SJosh Poimboeuf.macro ENTER_IRQ_STACK regs=1 old_rsp 4831d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 4841d3e53e8SAndy Lutomirski movq %rsp, \old_rsp 4858c1f7558SJosh Poimboeuf 4868c1f7558SJosh Poimboeuf .if \regs 4878c1f7558SJosh Poimboeuf UNWIND_HINT_REGS base=\old_rsp 4888c1f7558SJosh Poimboeuf .endif 4898c1f7558SJosh Poimboeuf 4901d3e53e8SAndy Lutomirski incl PER_CPU_VAR(irq_count) 49129955909SAndy Lutomirski jnz .Lirq_stack_push_old_rsp_\@ 4921d3e53e8SAndy Lutomirski 4931d3e53e8SAndy Lutomirski /* 4941d3e53e8SAndy Lutomirski * Right now, if we just incremented irq_count to zero, we've 4951d3e53e8SAndy Lutomirski * claimed the IRQ stack but we haven't switched to it yet. 4961d3e53e8SAndy Lutomirski * 4971d3e53e8SAndy Lutomirski * If anything is added that can interrupt us here without using IST, 4981d3e53e8SAndy Lutomirski * it must be *extremely* careful to limit its stack usage. This 4991d3e53e8SAndy Lutomirski * could include kprobes and a hypothetical future IST-less #DB 5001d3e53e8SAndy Lutomirski * handler. 50129955909SAndy Lutomirski * 50229955909SAndy Lutomirski * The OOPS unwinder relies on the word at the top of the IRQ 50329955909SAndy Lutomirski * stack linking back to the previous RSP for the entire time we're 50429955909SAndy Lutomirski * on the IRQ stack. For this to work reliably, we need to write 50529955909SAndy Lutomirski * it before we actually move ourselves to the IRQ stack. 5061d3e53e8SAndy Lutomirski */ 5071d3e53e8SAndy Lutomirski 50829955909SAndy Lutomirski movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 50929955909SAndy Lutomirski movq PER_CPU_VAR(irq_stack_ptr), %rsp 51029955909SAndy Lutomirski 51129955909SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 51229955909SAndy Lutomirski /* 51329955909SAndy Lutomirski * If the first movq above becomes wrong due to IRQ stack layout 51429955909SAndy Lutomirski * changes, the only way we'll notice is if we try to unwind right 51529955909SAndy Lutomirski * here. Assert that we set up the stack right to catch this type 51629955909SAndy Lutomirski * of bug quickly. 51729955909SAndy Lutomirski */ 51829955909SAndy Lutomirski cmpq -8(%rsp), \old_rsp 51929955909SAndy Lutomirski je .Lirq_stack_okay\@ 52029955909SAndy Lutomirski ud2 52129955909SAndy Lutomirski .Lirq_stack_okay\@: 52229955909SAndy Lutomirski#endif 52329955909SAndy Lutomirski 52429955909SAndy Lutomirski.Lirq_stack_push_old_rsp_\@: 5251d3e53e8SAndy Lutomirski pushq \old_rsp 5268c1f7558SJosh Poimboeuf 5278c1f7558SJosh Poimboeuf .if \regs 5288c1f7558SJosh Poimboeuf UNWIND_HINT_REGS indirect=1 5298c1f7558SJosh Poimboeuf .endif 5301d3e53e8SAndy Lutomirski.endm 5311d3e53e8SAndy Lutomirski 5321d3e53e8SAndy Lutomirski/* 5331d3e53e8SAndy Lutomirski * Undoes ENTER_IRQ_STACK. 5341d3e53e8SAndy Lutomirski */ 5358c1f7558SJosh Poimboeuf.macro LEAVE_IRQ_STACK regs=1 5361d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 5371d3e53e8SAndy Lutomirski /* We need to be off the IRQ stack before decrementing irq_count. */ 5381d3e53e8SAndy Lutomirski popq %rsp 5391d3e53e8SAndy Lutomirski 5408c1f7558SJosh Poimboeuf .if \regs 5418c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 5428c1f7558SJosh Poimboeuf .endif 5438c1f7558SJosh Poimboeuf 5441d3e53e8SAndy Lutomirski /* 5451d3e53e8SAndy Lutomirski * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 5461d3e53e8SAndy Lutomirski * the irq stack but we're not on it. 5471d3e53e8SAndy Lutomirski */ 5481d3e53e8SAndy Lutomirski 5491d3e53e8SAndy Lutomirski decl PER_CPU_VAR(irq_count) 5501d3e53e8SAndy Lutomirski.endm 5511d3e53e8SAndy Lutomirski 552905a36a2SIngo Molnar/* 553905a36a2SIngo Molnar * Interrupt entry/exit. 554905a36a2SIngo Molnar * 555905a36a2SIngo Molnar * Interrupt entry points save only callee clobbered registers in fast path. 556905a36a2SIngo Molnar * 557905a36a2SIngo Molnar * Entry runs with interrupts off. 558905a36a2SIngo Molnar */ 559905a36a2SIngo Molnar 560905a36a2SIngo Molnar/* 0(%rsp): ~(interrupt number) */ 561905a36a2SIngo Molnar .macro interrupt func 562905a36a2SIngo Molnar cld 563*7f2590a1SAndy Lutomirski 564*7f2590a1SAndy Lutomirski testb $3, CS-ORIG_RAX(%rsp) 565*7f2590a1SAndy Lutomirski jz 1f 566*7f2590a1SAndy Lutomirski SWAPGS 567*7f2590a1SAndy Lutomirski call switch_to_thread_stack 568*7f2590a1SAndy Lutomirski1: 569*7f2590a1SAndy Lutomirski 570ff467594SAndy Lutomirski ALLOC_PT_GPREGS_ON_STACK 571ff467594SAndy Lutomirski SAVE_C_REGS 572ff467594SAndy Lutomirski SAVE_EXTRA_REGS 573946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 574905a36a2SIngo Molnar 575ff467594SAndy Lutomirski testb $3, CS(%rsp) 576905a36a2SIngo Molnar jz 1f 57702bc7768SAndy Lutomirski 57802bc7768SAndy Lutomirski /* 579*7f2590a1SAndy Lutomirski * IRQ from user mode. 580*7f2590a1SAndy Lutomirski * 581f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 582f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 583f1075053SAndy Lutomirski * (which can take locks). Since TRACE_IRQS_OFF idempotent, 584f1075053SAndy Lutomirski * the simplest way to handle it is to just call it twice if 585f1075053SAndy Lutomirski * we enter from user mode. There's no reason to optimize this since 586f1075053SAndy Lutomirski * TRACE_IRQS_OFF is a no-op if lockdep is off. 587f1075053SAndy Lutomirski */ 588f1075053SAndy Lutomirski TRACE_IRQS_OFF 589f1075053SAndy Lutomirski 590478dc89cSAndy Lutomirski CALL_enter_from_user_mode 59102bc7768SAndy Lutomirski 592905a36a2SIngo Molnar1: 5931d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%rdi 594905a36a2SIngo Molnar /* We entered an interrupt context - irqs are off: */ 595905a36a2SIngo Molnar TRACE_IRQS_OFF 596905a36a2SIngo Molnar 597a586f98eSAndy Lutomirski call \func /* rdi points to pt_regs */ 598905a36a2SIngo Molnar .endm 599905a36a2SIngo Molnar 600905a36a2SIngo Molnar /* 601905a36a2SIngo Molnar * The interrupt stubs push (~vector+0x80) onto the stack and 602905a36a2SIngo Molnar * then jump to common_interrupt. 603905a36a2SIngo Molnar */ 604905a36a2SIngo Molnar .p2align CONFIG_X86_L1_CACHE_SHIFT 605905a36a2SIngo Molnarcommon_interrupt: 606905a36a2SIngo Molnar ASM_CLAC 607905a36a2SIngo Molnar addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 608905a36a2SIngo Molnar interrupt do_IRQ 609905a36a2SIngo Molnar /* 0(%rsp): old RSP */ 610905a36a2SIngo Molnarret_from_intr: 6112140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 612905a36a2SIngo Molnar TRACE_IRQS_OFF 613905a36a2SIngo Molnar 6141d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 615905a36a2SIngo Molnar 616905a36a2SIngo Molnar testb $3, CS(%rsp) 617905a36a2SIngo Molnar jz retint_kernel 61802bc7768SAndy Lutomirski 619905a36a2SIngo Molnar /* Interrupt came from user space */ 62002bc7768SAndy LutomirskiGLOBAL(retint_user) 62102bc7768SAndy Lutomirski mov %rsp,%rdi 62202bc7768SAndy Lutomirski call prepare_exit_to_usermode 623905a36a2SIngo Molnar TRACE_IRQS_IRETQ 62426c4ef9cSAndy Lutomirski 6258a055d7fSAndy LutomirskiGLOBAL(swapgs_restore_regs_and_return_to_usermode) 62626c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 62726c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 6281e4c4f61SBorislav Petkov testb $3, CS(%rsp) 62926c4ef9cSAndy Lutomirski jnz 1f 63026c4ef9cSAndy Lutomirski ud2 63126c4ef9cSAndy Lutomirski1: 63226c4ef9cSAndy Lutomirski#endif 633905a36a2SIngo Molnar SWAPGS 634e872045bSAndy Lutomirski POP_EXTRA_REGS 635e872045bSAndy Lutomirski POP_C_REGS 636e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 63726c4ef9cSAndy Lutomirski INTERRUPT_RETURN 63826c4ef9cSAndy Lutomirski 639905a36a2SIngo Molnar 640905a36a2SIngo Molnar/* Returning to kernel space */ 641905a36a2SIngo Molnarretint_kernel: 642905a36a2SIngo Molnar#ifdef CONFIG_PREEMPT 643905a36a2SIngo Molnar /* Interrupts are off */ 644905a36a2SIngo Molnar /* Check if we need preemption */ 6454d732138SIngo Molnar bt $9, EFLAGS(%rsp) /* were interrupts off? */ 646905a36a2SIngo Molnar jnc 1f 647905a36a2SIngo Molnar0: cmpl $0, PER_CPU_VAR(__preempt_count) 648905a36a2SIngo Molnar jnz 1f 649905a36a2SIngo Molnar call preempt_schedule_irq 650905a36a2SIngo Molnar jmp 0b 651905a36a2SIngo Molnar1: 652905a36a2SIngo Molnar#endif 653905a36a2SIngo Molnar /* 654905a36a2SIngo Molnar * The iretq could re-enable interrupts: 655905a36a2SIngo Molnar */ 656905a36a2SIngo Molnar TRACE_IRQS_IRETQ 657905a36a2SIngo Molnar 65826c4ef9cSAndy LutomirskiGLOBAL(restore_regs_and_return_to_kernel) 65926c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 66026c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 6611e4c4f61SBorislav Petkov testb $3, CS(%rsp) 66226c4ef9cSAndy Lutomirski jz 1f 66326c4ef9cSAndy Lutomirski ud2 66426c4ef9cSAndy Lutomirski1: 66526c4ef9cSAndy Lutomirski#endif 666e872045bSAndy Lutomirski POP_EXTRA_REGS 667e872045bSAndy Lutomirski POP_C_REGS 668e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 669905a36a2SIngo Molnar INTERRUPT_RETURN 670905a36a2SIngo Molnar 671905a36a2SIngo MolnarENTRY(native_iret) 6728c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 673905a36a2SIngo Molnar /* 674905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 675905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 676905a36a2SIngo Molnar */ 677905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 678905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 679905a36a2SIngo Molnar jnz native_irq_return_ldt 680905a36a2SIngo Molnar#endif 681905a36a2SIngo Molnar 682905a36a2SIngo Molnar.global native_irq_return_iret 683905a36a2SIngo Molnarnative_irq_return_iret: 684905a36a2SIngo Molnar /* 685905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 686905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 687905a36a2SIngo Molnar * Double-faults due to espfix64 are handled in do_double_fault. 688905a36a2SIngo Molnar * Other faults here are fatal. 689905a36a2SIngo Molnar */ 690905a36a2SIngo Molnar iretq 691905a36a2SIngo Molnar 692905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 693905a36a2SIngo Molnarnative_irq_return_ldt: 69485063facSAndy Lutomirski /* 69585063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 69685063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 69785063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 69885063facSAndy Lutomirski * of the ESPFIX stack. 69985063facSAndy Lutomirski * 70085063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 70185063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 70285063facSAndy Lutomirski * 70385063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 70485063facSAndy Lutomirski * 70585063facSAndy Lutomirski * --- top of ESPFIX stack --- 70685063facSAndy Lutomirski * SS 70785063facSAndy Lutomirski * RSP 70885063facSAndy Lutomirski * RFLAGS 70985063facSAndy Lutomirski * CS 71085063facSAndy Lutomirski * RIP <-- RSP points here when we're done 71185063facSAndy Lutomirski * RAX <-- espfix_waddr points here 71285063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 71385063facSAndy Lutomirski */ 71485063facSAndy Lutomirski 71585063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 716905a36a2SIngo Molnar SWAPGS 717905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 71885063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 71985063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 720905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 72185063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 722905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 72385063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 724905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 72585063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 726905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 72785063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 728905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 72985063facSAndy Lutomirski /* Now RAX == RSP. */ 73085063facSAndy Lutomirski 73185063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 73285063facSAndy Lutomirski popq %rdi /* Restore user RDI */ 73385063facSAndy Lutomirski 73485063facSAndy Lutomirski /* 73585063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 73685063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 73785063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 73885063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 73985063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 74085063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 74185063facSAndy Lutomirski */ 742905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 743905a36a2SIngo Molnar SWAPGS 744905a36a2SIngo Molnar movq %rax, %rsp 7458c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 74685063facSAndy Lutomirski 74785063facSAndy Lutomirski /* 74885063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 74985063facSAndy Lutomirski * still read. 75085063facSAndy Lutomirski */ 75185063facSAndy Lutomirski popq %rax /* Restore user RAX */ 75285063facSAndy Lutomirski 75385063facSAndy Lutomirski /* 75485063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 75585063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 75685063facSAndy Lutomirski * values. We can now IRET back to userspace. 75785063facSAndy Lutomirski */ 758905a36a2SIngo Molnar jmp native_irq_return_iret 759905a36a2SIngo Molnar#endif 760905a36a2SIngo MolnarEND(common_interrupt) 761905a36a2SIngo Molnar 762905a36a2SIngo Molnar/* 763905a36a2SIngo Molnar * APIC interrupts. 764905a36a2SIngo Molnar */ 765905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym 766905a36a2SIngo MolnarENTRY(\sym) 7678c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 768905a36a2SIngo Molnar ASM_CLAC 769905a36a2SIngo Molnar pushq $~(\num) 770905a36a2SIngo Molnar.Lcommon_\sym: 771905a36a2SIngo Molnar interrupt \do_sym 772905a36a2SIngo Molnar jmp ret_from_intr 773905a36a2SIngo MolnarEND(\sym) 774905a36a2SIngo Molnar.endm 775905a36a2SIngo Molnar 776469f0023SAlexander Potapenko/* Make sure APIC interrupt handlers end up in the irqentry section: */ 777469f0023SAlexander Potapenko#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 778469f0023SAlexander Potapenko#define POP_SECTION_IRQENTRY .popsection 779469f0023SAlexander Potapenko 780905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym 781469f0023SAlexander PotapenkoPUSH_SECTION_IRQENTRY 782905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym 783469f0023SAlexander PotapenkoPOP_SECTION_IRQENTRY 784905a36a2SIngo Molnar.endm 785905a36a2SIngo Molnar 786905a36a2SIngo Molnar#ifdef CONFIG_SMP 7874d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 7884d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 789905a36a2SIngo Molnar#endif 790905a36a2SIngo Molnar 791905a36a2SIngo Molnar#ifdef CONFIG_X86_UV 7924d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 793905a36a2SIngo Molnar#endif 7944d732138SIngo Molnar 7954d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 7964d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 797905a36a2SIngo Molnar 798905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM 7994d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 8004d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 801210f84b0SWincy Vanapicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 802905a36a2SIngo Molnar#endif 803905a36a2SIngo Molnar 804905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD 8054d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 806905a36a2SIngo Molnar#endif 807905a36a2SIngo Molnar 8089dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD 8094d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 8109dda1658SIngo Molnar#endif 8119dda1658SIngo Molnar 812905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR 8134d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 814905a36a2SIngo Molnar#endif 815905a36a2SIngo Molnar 816905a36a2SIngo Molnar#ifdef CONFIG_SMP 8174d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 8184d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 8194d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 820905a36a2SIngo Molnar#endif 821905a36a2SIngo Molnar 8224d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 8234d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 824905a36a2SIngo Molnar 825905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK 8264d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 827905a36a2SIngo Molnar#endif 828905a36a2SIngo Molnar 829905a36a2SIngo Molnar/* 830905a36a2SIngo Molnar * Exception entry points. 831905a36a2SIngo Molnar */ 832905a36a2SIngo Molnar#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) 833905a36a2SIngo Molnar 834*7f2590a1SAndy Lutomirski/* 835*7f2590a1SAndy Lutomirski * Switch to the thread stack. This is called with the IRET frame and 836*7f2590a1SAndy Lutomirski * orig_ax on the stack. (That is, RDI..R12 are not on the stack and 837*7f2590a1SAndy Lutomirski * space has not been allocated for them.) 838*7f2590a1SAndy Lutomirski */ 839*7f2590a1SAndy LutomirskiENTRY(switch_to_thread_stack) 840*7f2590a1SAndy Lutomirski UNWIND_HINT_FUNC 841*7f2590a1SAndy Lutomirski 842*7f2590a1SAndy Lutomirski pushq %rdi 843*7f2590a1SAndy Lutomirski movq %rsp, %rdi 844*7f2590a1SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 845*7f2590a1SAndy Lutomirski UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI 846*7f2590a1SAndy Lutomirski 847*7f2590a1SAndy Lutomirski pushq 7*8(%rdi) /* regs->ss */ 848*7f2590a1SAndy Lutomirski pushq 6*8(%rdi) /* regs->rsp */ 849*7f2590a1SAndy Lutomirski pushq 5*8(%rdi) /* regs->eflags */ 850*7f2590a1SAndy Lutomirski pushq 4*8(%rdi) /* regs->cs */ 851*7f2590a1SAndy Lutomirski pushq 3*8(%rdi) /* regs->ip */ 852*7f2590a1SAndy Lutomirski pushq 2*8(%rdi) /* regs->orig_ax */ 853*7f2590a1SAndy Lutomirski pushq 8(%rdi) /* return address */ 854*7f2590a1SAndy Lutomirski UNWIND_HINT_FUNC 855*7f2590a1SAndy Lutomirski 856*7f2590a1SAndy Lutomirski movq (%rdi), %rdi 857*7f2590a1SAndy Lutomirski ret 858*7f2590a1SAndy LutomirskiEND(switch_to_thread_stack) 859*7f2590a1SAndy Lutomirski 860905a36a2SIngo Molnar.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 861905a36a2SIngo MolnarENTRY(\sym) 86298990a33SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=\has_error_code*8 8638c1f7558SJosh Poimboeuf 864905a36a2SIngo Molnar /* Sanity check */ 865905a36a2SIngo Molnar .if \shift_ist != -1 && \paranoid == 0 866905a36a2SIngo Molnar .error "using shift_ist requires paranoid=1" 867905a36a2SIngo Molnar .endif 868905a36a2SIngo Molnar 869905a36a2SIngo Molnar ASM_CLAC 870905a36a2SIngo Molnar 87182c62fa0SJosh Poimboeuf .if \has_error_code == 0 872905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 873905a36a2SIngo Molnar .endif 874905a36a2SIngo Molnar 875905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 876905a36a2SIngo Molnar 877*7f2590a1SAndy Lutomirski .if \paranoid < 2 8784d732138SIngo Molnar testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 879*7f2590a1SAndy Lutomirski jnz .Lfrom_usermode_switch_stack_\@ 880905a36a2SIngo Molnar .endif 881*7f2590a1SAndy Lutomirski 882*7f2590a1SAndy Lutomirski .if \paranoid 883905a36a2SIngo Molnar call paranoid_entry 884905a36a2SIngo Molnar .else 885905a36a2SIngo Molnar call error_entry 886905a36a2SIngo Molnar .endif 8878c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 888905a36a2SIngo Molnar /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 889905a36a2SIngo Molnar 890905a36a2SIngo Molnar .if \paranoid 891905a36a2SIngo Molnar .if \shift_ist != -1 892905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 893905a36a2SIngo Molnar .else 894905a36a2SIngo Molnar TRACE_IRQS_OFF 895905a36a2SIngo Molnar .endif 896905a36a2SIngo Molnar .endif 897905a36a2SIngo Molnar 898905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 899905a36a2SIngo Molnar 900905a36a2SIngo Molnar .if \has_error_code 901905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 902905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 903905a36a2SIngo Molnar .else 904905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 905905a36a2SIngo Molnar .endif 906905a36a2SIngo Molnar 907905a36a2SIngo Molnar .if \shift_ist != -1 908905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 909905a36a2SIngo Molnar .endif 910905a36a2SIngo Molnar 911905a36a2SIngo Molnar call \do_sym 912905a36a2SIngo Molnar 913905a36a2SIngo Molnar .if \shift_ist != -1 914905a36a2SIngo Molnar addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 915905a36a2SIngo Molnar .endif 916905a36a2SIngo Molnar 917905a36a2SIngo Molnar /* these procedures expect "no swapgs" flag in ebx */ 918905a36a2SIngo Molnar .if \paranoid 919905a36a2SIngo Molnar jmp paranoid_exit 920905a36a2SIngo Molnar .else 921905a36a2SIngo Molnar jmp error_exit 922905a36a2SIngo Molnar .endif 923905a36a2SIngo Molnar 924*7f2590a1SAndy Lutomirski .if \paranoid < 2 925905a36a2SIngo Molnar /* 926*7f2590a1SAndy Lutomirski * Entry from userspace. Switch stacks and treat it 927905a36a2SIngo Molnar * as a normal entry. This means that paranoid handlers 928905a36a2SIngo Molnar * run in real process context if user_mode(regs). 929905a36a2SIngo Molnar */ 930*7f2590a1SAndy Lutomirski.Lfrom_usermode_switch_stack_\@: 931905a36a2SIngo Molnar call error_entry 932905a36a2SIngo Molnar 933905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 934905a36a2SIngo Molnar 935905a36a2SIngo Molnar .if \has_error_code 936905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 937905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 938905a36a2SIngo Molnar .else 939905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 940905a36a2SIngo Molnar .endif 941905a36a2SIngo Molnar 942905a36a2SIngo Molnar call \do_sym 943905a36a2SIngo Molnar 944905a36a2SIngo Molnar jmp error_exit /* %ebx: no swapgs flag */ 945905a36a2SIngo Molnar .endif 946905a36a2SIngo MolnarEND(\sym) 947905a36a2SIngo Molnar.endm 948905a36a2SIngo Molnar 949905a36a2SIngo Molnaridtentry divide_error do_divide_error has_error_code=0 950905a36a2SIngo Molnaridtentry overflow do_overflow has_error_code=0 951905a36a2SIngo Molnaridtentry bounds do_bounds has_error_code=0 952905a36a2SIngo Molnaridtentry invalid_op do_invalid_op has_error_code=0 953905a36a2SIngo Molnaridtentry device_not_available do_device_not_available has_error_code=0 954905a36a2SIngo Molnaridtentry double_fault do_double_fault has_error_code=1 paranoid=2 955905a36a2SIngo Molnaridtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 956905a36a2SIngo Molnaridtentry invalid_TSS do_invalid_TSS has_error_code=1 957905a36a2SIngo Molnaridtentry segment_not_present do_segment_not_present has_error_code=1 958905a36a2SIngo Molnaridtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 959905a36a2SIngo Molnaridtentry coprocessor_error do_coprocessor_error has_error_code=0 960905a36a2SIngo Molnaridtentry alignment_check do_alignment_check has_error_code=1 961905a36a2SIngo Molnaridtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 962905a36a2SIngo Molnar 963905a36a2SIngo Molnar 9644d732138SIngo Molnar /* 9654d732138SIngo Molnar * Reload gs selector with exception handling 9664d732138SIngo Molnar * edi: new selector 9674d732138SIngo Molnar */ 968905a36a2SIngo MolnarENTRY(native_load_gs_index) 9698c1f7558SJosh Poimboeuf FRAME_BEGIN 970905a36a2SIngo Molnar pushfq 971905a36a2SIngo Molnar DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 972905a36a2SIngo Molnar SWAPGS 97342c748bbSBorislav Petkov.Lgs_change: 974905a36a2SIngo Molnar movl %edi, %gs 97596e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 976905a36a2SIngo Molnar SWAPGS 977905a36a2SIngo Molnar popfq 9788c1f7558SJosh Poimboeuf FRAME_END 979905a36a2SIngo Molnar ret 9808c1f7558SJosh PoimboeufENDPROC(native_load_gs_index) 981784d5699SAl ViroEXPORT_SYMBOL(native_load_gs_index) 982905a36a2SIngo Molnar 98342c748bbSBorislav Petkov _ASM_EXTABLE(.Lgs_change, bad_gs) 984905a36a2SIngo Molnar .section .fixup, "ax" 985905a36a2SIngo Molnar /* running with kernelgs */ 986905a36a2SIngo Molnarbad_gs: 987905a36a2SIngo Molnar SWAPGS /* switch back to user gs */ 988b038c842SAndy Lutomirski.macro ZAP_GS 989b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 990b038c842SAndy Lutomirski movl $__USER_DS, %eax 991b038c842SAndy Lutomirski movl %eax, %gs 992b038c842SAndy Lutomirski.endm 993b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 994905a36a2SIngo Molnar xorl %eax, %eax 995905a36a2SIngo Molnar movl %eax, %gs 996905a36a2SIngo Molnar jmp 2b 997905a36a2SIngo Molnar .previous 998905a36a2SIngo Molnar 999905a36a2SIngo Molnar/* Call softirq on interrupt stack. Interrupts are off. */ 1000905a36a2SIngo MolnarENTRY(do_softirq_own_stack) 1001905a36a2SIngo Molnar pushq %rbp 1002905a36a2SIngo Molnar mov %rsp, %rbp 10038c1f7558SJosh Poimboeuf ENTER_IRQ_STACK regs=0 old_rsp=%r11 1004905a36a2SIngo Molnar call __do_softirq 10058c1f7558SJosh Poimboeuf LEAVE_IRQ_STACK regs=0 1006905a36a2SIngo Molnar leaveq 1007905a36a2SIngo Molnar ret 10088c1f7558SJosh PoimboeufENDPROC(do_softirq_own_stack) 1009905a36a2SIngo Molnar 1010905a36a2SIngo Molnar#ifdef CONFIG_XEN 10115878d5d6SJuergen Grossidtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 1012905a36a2SIngo Molnar 1013905a36a2SIngo Molnar/* 1014905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 1015905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 1016905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 1017905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 1018905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 1019905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 1020905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 1021905a36a2SIngo Molnar * like to avoid the possibility. 1022905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 1023905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 1024905a36a2SIngo Molnar * activation and restart the handler using the previous one. 1025905a36a2SIngo Molnar */ 10264d732138SIngo MolnarENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 10274d732138SIngo Molnar 1028905a36a2SIngo Molnar/* 1029905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1030905a36a2SIngo Molnar * see the correct pointer to the pt_regs 1031905a36a2SIngo Molnar */ 10328c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 10334d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 10348c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 10351d3e53e8SAndy Lutomirski 10361d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%r10 1037905a36a2SIngo Molnar call xen_evtchn_do_upcall 10381d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 10391d3e53e8SAndy Lutomirski 1040905a36a2SIngo Molnar#ifndef CONFIG_PREEMPT 1041905a36a2SIngo Molnar call xen_maybe_preempt_hcall 1042905a36a2SIngo Molnar#endif 1043905a36a2SIngo Molnar jmp error_exit 1044905a36a2SIngo MolnarEND(xen_do_hypervisor_callback) 1045905a36a2SIngo Molnar 1046905a36a2SIngo Molnar/* 1047905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 1048905a36a2SIngo Molnar * We get here for two reasons: 1049905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 1050905a36a2SIngo Molnar * 2. Fault while executing IRET 1051905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 1052905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 1053905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 1054905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 1055905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1056905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 1057905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 1058905a36a2SIngo Molnar */ 1059905a36a2SIngo MolnarENTRY(xen_failsafe_callback) 10608c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1061905a36a2SIngo Molnar movl %ds, %ecx 1062905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 1063905a36a2SIngo Molnar jne 1f 1064905a36a2SIngo Molnar movl %es, %ecx 1065905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 1066905a36a2SIngo Molnar jne 1f 1067905a36a2SIngo Molnar movl %fs, %ecx 1068905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 1069905a36a2SIngo Molnar jne 1f 1070905a36a2SIngo Molnar movl %gs, %ecx 1071905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 1072905a36a2SIngo Molnar jne 1f 1073905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 1074905a36a2SIngo Molnar movq (%rsp), %rcx 1075905a36a2SIngo Molnar movq 8(%rsp), %r11 1076905a36a2SIngo Molnar addq $0x30, %rsp 1077905a36a2SIngo Molnar pushq $0 /* RIP */ 10788c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 1079905a36a2SIngo Molnar jmp general_protection 1080905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1081905a36a2SIngo Molnar movq (%rsp), %rcx 1082905a36a2SIngo Molnar movq 8(%rsp), %r11 1083905a36a2SIngo Molnar addq $0x30, %rsp 10848c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1085905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 1086905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1087905a36a2SIngo Molnar SAVE_C_REGS 1088905a36a2SIngo Molnar SAVE_EXTRA_REGS 1089946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 1090905a36a2SIngo Molnar jmp error_exit 1091905a36a2SIngo MolnarEND(xen_failsafe_callback) 1092905a36a2SIngo Molnar 1093905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1094905a36a2SIngo Molnar xen_hvm_callback_vector xen_evtchn_do_upcall 1095905a36a2SIngo Molnar 1096905a36a2SIngo Molnar#endif /* CONFIG_XEN */ 1097905a36a2SIngo Molnar 1098905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV) 1099905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1100905a36a2SIngo Molnar hyperv_callback_vector hyperv_vector_handler 1101905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */ 1102905a36a2SIngo Molnar 1103905a36a2SIngo Molnaridtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1104905a36a2SIngo Molnaridtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1105905a36a2SIngo Molnaridtentry stack_segment do_stack_segment has_error_code=1 11064d732138SIngo Molnar 1107905a36a2SIngo Molnar#ifdef CONFIG_XEN 110843e41110SJuergen Grossidtentry xennmi do_nmi has_error_code=0 11095878d5d6SJuergen Grossidtentry xendebug do_debug has_error_code=0 11105878d5d6SJuergen Grossidtentry xenint3 do_int3 has_error_code=0 1111905a36a2SIngo Molnar#endif 11124d732138SIngo Molnar 1113905a36a2SIngo Molnaridtentry general_protection do_general_protection has_error_code=1 111411a7ffb0SThomas Gleixneridtentry page_fault do_page_fault has_error_code=1 11154d732138SIngo Molnar 1116905a36a2SIngo Molnar#ifdef CONFIG_KVM_GUEST 1117905a36a2SIngo Molnaridtentry async_page_fault do_async_page_fault has_error_code=1 1118905a36a2SIngo Molnar#endif 11194d732138SIngo Molnar 1120905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE 1121905a36a2SIngo Molnaridtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1122905a36a2SIngo Molnar#endif 1123905a36a2SIngo Molnar 1124905a36a2SIngo Molnar/* 1125905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1126905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check. 1127905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1128905a36a2SIngo Molnar */ 1129905a36a2SIngo MolnarENTRY(paranoid_entry) 11308c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1131905a36a2SIngo Molnar cld 1132905a36a2SIngo Molnar SAVE_C_REGS 8 1133905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1134946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1135905a36a2SIngo Molnar movl $1, %ebx 1136905a36a2SIngo Molnar movl $MSR_GS_BASE, %ecx 1137905a36a2SIngo Molnar rdmsr 1138905a36a2SIngo Molnar testl %edx, %edx 1139905a36a2SIngo Molnar js 1f /* negative -> in kernel */ 1140905a36a2SIngo Molnar SWAPGS 1141905a36a2SIngo Molnar xorl %ebx, %ebx 1142905a36a2SIngo Molnar1: ret 1143905a36a2SIngo MolnarEND(paranoid_entry) 1144905a36a2SIngo Molnar 1145905a36a2SIngo Molnar/* 1146905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 1147905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 1148905a36a2SIngo Molnar * from kernel space. 1149905a36a2SIngo Molnar * 1150905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 1151905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 1152905a36a2SIngo Molnar * be complicated. Fortunately, we there's no good reason 1153905a36a2SIngo Molnar * to try to handle preemption here. 11544d732138SIngo Molnar * 11554d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1156905a36a2SIngo Molnar */ 1157905a36a2SIngo MolnarENTRY(paranoid_exit) 11588c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 11592140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1160905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG 1161905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1162e5317832SAndy Lutomirski jnz .Lparanoid_exit_no_swapgs 1163905a36a2SIngo Molnar TRACE_IRQS_IRETQ 1164905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1165e5317832SAndy Lutomirski jmp .Lparanoid_exit_restore 1166e5317832SAndy Lutomirski.Lparanoid_exit_no_swapgs: 1167905a36a2SIngo Molnar TRACE_IRQS_IRETQ_DEBUG 1168e5317832SAndy Lutomirski.Lparanoid_exit_restore: 1169e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 1170905a36a2SIngo MolnarEND(paranoid_exit) 1171905a36a2SIngo Molnar 1172905a36a2SIngo Molnar/* 1173905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1174539f5113SAndy Lutomirski * Return: EBX=0: came from user mode; EBX=1: otherwise 1175905a36a2SIngo Molnar */ 1176905a36a2SIngo MolnarENTRY(error_entry) 11778c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1178905a36a2SIngo Molnar cld 1179905a36a2SIngo Molnar SAVE_C_REGS 8 1180905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1181946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1182905a36a2SIngo Molnar xorl %ebx, %ebx 1183905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1184cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1185539f5113SAndy Lutomirski 1186cb6f64edSAndy Lutomirski /* 1187cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1188cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1189cb6f64edSAndy Lutomirski */ 1190905a36a2SIngo Molnar SWAPGS 1191539f5113SAndy Lutomirski 1192cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 1193*7f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 1194*7f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 1195*7f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 1196*7f2590a1SAndy Lutomirski call sync_regs 1197*7f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 1198*7f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 1199*7f2590a1SAndy Lutomirski pushq %r12 1200*7f2590a1SAndy Lutomirski 1201f1075053SAndy Lutomirski /* 1202f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 1203f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 1204f1075053SAndy Lutomirski * (which can take locks). 1205f1075053SAndy Lutomirski */ 1206f1075053SAndy Lutomirski TRACE_IRQS_OFF 1207478dc89cSAndy Lutomirski CALL_enter_from_user_mode 1208f1075053SAndy Lutomirski ret 120902bc7768SAndy Lutomirski 1210cb6f64edSAndy Lutomirski.Lerror_entry_done: 1211905a36a2SIngo Molnar TRACE_IRQS_OFF 1212905a36a2SIngo Molnar ret 1213905a36a2SIngo Molnar 1214905a36a2SIngo Molnar /* 1215905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1216905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1217905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1218905a36a2SIngo Molnar * for these here too. 1219905a36a2SIngo Molnar */ 1220cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1221905a36a2SIngo Molnar incl %ebx 1222905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1223905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1224cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1225905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1226905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1227cb6f64edSAndy Lutomirski je .Lbstep_iret 122842c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 1229cb6f64edSAndy Lutomirski jne .Lerror_entry_done 1230539f5113SAndy Lutomirski 1231539f5113SAndy Lutomirski /* 123242c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1233539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 123442c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1235539f5113SAndy Lutomirski */ 12362fa5f04fSWanpeng Li SWAPGS 12372fa5f04fSWanpeng Li jmp .Lerror_entry_done 1238905a36a2SIngo Molnar 1239cb6f64edSAndy Lutomirski.Lbstep_iret: 1240905a36a2SIngo Molnar /* Fix truncated RIP */ 1241905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1242905a36a2SIngo Molnar /* fall through */ 1243905a36a2SIngo Molnar 1244cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1245539f5113SAndy Lutomirski /* 1246539f5113SAndy Lutomirski * We came from an IRET to user mode, so we have user gsbase. 1247539f5113SAndy Lutomirski * Switch to kernel gsbase: 1248539f5113SAndy Lutomirski */ 1249905a36a2SIngo Molnar SWAPGS 1250539f5113SAndy Lutomirski 1251539f5113SAndy Lutomirski /* 1252539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1253539f5113SAndy Lutomirski * as if we faulted immediately after IRET and clear EBX so that 1254539f5113SAndy Lutomirski * error_exit knows that we will be returning to user mode. 1255539f5113SAndy Lutomirski */ 1256905a36a2SIngo Molnar mov %rsp, %rdi 1257905a36a2SIngo Molnar call fixup_bad_iret 1258905a36a2SIngo Molnar mov %rax, %rsp 1259539f5113SAndy Lutomirski decl %ebx 1260cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1261905a36a2SIngo MolnarEND(error_entry) 1262905a36a2SIngo Molnar 1263905a36a2SIngo Molnar 1264539f5113SAndy Lutomirski/* 126575ca5b22SNicolas Iooss * On entry, EBX is a "return to kernel mode" flag: 1266539f5113SAndy Lutomirski * 1: already in kernel mode, don't need SWAPGS 1267539f5113SAndy Lutomirski * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1268539f5113SAndy Lutomirski */ 1269905a36a2SIngo MolnarENTRY(error_exit) 12708c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 12712140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1272905a36a2SIngo Molnar TRACE_IRQS_OFF 12732140a994SJan Beulich testl %ebx, %ebx 1274905a36a2SIngo Molnar jnz retint_kernel 1275905a36a2SIngo Molnar jmp retint_user 1276905a36a2SIngo MolnarEND(error_exit) 1277905a36a2SIngo Molnar 1278929bacecSAndy Lutomirski/* 1279929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1280929bacecSAndy Lutomirski * so we can use real assembly here. 1281929bacecSAndy Lutomirski */ 1282905a36a2SIngo MolnarENTRY(nmi) 12838c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1284929bacecSAndy Lutomirski 1285fc57a7c6SAndy Lutomirski /* 1286905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1287905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1288905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1289905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1290905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1291905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1292905a36a2SIngo Molnar * anyway. 1293905a36a2SIngo Molnar * 1294905a36a2SIngo Molnar * To handle this case we do the following: 1295905a36a2SIngo Molnar * Check the a special location on the stack that contains 1296905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1297905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1298905a36a2SIngo Molnar * is an NMI stack. 1299905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1300905a36a2SIngo Molnar * stack then: 1301905a36a2SIngo Molnar * o Set the special variable on the stack 13020b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 13030b22930eSAndy Lutomirski * stack 13040b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1305905a36a2SIngo Molnar * o Continue processing the NMI 1306905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 13070b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1308905a36a2SIngo Molnar * o return back to the first NMI 1309905a36a2SIngo Molnar * 1310905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1311905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1312905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1313905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1314905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1315905a36a2SIngo Molnar * NMI. 13169b6e6a83SAndy Lutomirski * 13179b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 13189b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 13199b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 13209b6e6a83SAndy Lutomirski * other IST entries. 1321905a36a2SIngo Molnar */ 1322905a36a2SIngo Molnar 1323e93c1730SAndy Lutomirski ASM_CLAC 1324e93c1730SAndy Lutomirski 1325905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1326905a36a2SIngo Molnar pushq %rdx 1327905a36a2SIngo Molnar 13289b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 13299b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1330905a36a2SIngo Molnar 1331905a36a2SIngo Molnar /* 13329b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 13339b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 13349b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 13359b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 13369b6e6a83SAndy Lutomirski * are off. 133783c133cfSAndy Lutomirski * 133883c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 133983c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 13409b6e6a83SAndy Lutomirski */ 13419b6e6a83SAndy Lutomirski 1342929bacecSAndy Lutomirski swapgs 13439b6e6a83SAndy Lutomirski cld 13449b6e6a83SAndy Lutomirski movq %rsp, %rdx 13459b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 13468c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 13479b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 13489b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 13499b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 13509b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 13519b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 13528c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 13539b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 13549b6e6a83SAndy Lutomirski pushq %rdi /* pt_regs->di */ 13559b6e6a83SAndy Lutomirski pushq %rsi /* pt_regs->si */ 13569b6e6a83SAndy Lutomirski pushq (%rdx) /* pt_regs->dx */ 13579b6e6a83SAndy Lutomirski pushq %rcx /* pt_regs->cx */ 13589b6e6a83SAndy Lutomirski pushq %rax /* pt_regs->ax */ 13599b6e6a83SAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 13609b6e6a83SAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 13619b6e6a83SAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 13629b6e6a83SAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 13639b6e6a83SAndy Lutomirski pushq %rbx /* pt_regs->rbx */ 13649b6e6a83SAndy Lutomirski pushq %rbp /* pt_regs->rbp */ 13659b6e6a83SAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 13669b6e6a83SAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 13679b6e6a83SAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 13689b6e6a83SAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 13698c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1370946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 13719b6e6a83SAndy Lutomirski 13729b6e6a83SAndy Lutomirski /* 13739b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 13749b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 13759b6e6a83SAndy Lutomirski * done with the NMI stack. 13769b6e6a83SAndy Lutomirski */ 13779b6e6a83SAndy Lutomirski 13789b6e6a83SAndy Lutomirski movq %rsp, %rdi 13799b6e6a83SAndy Lutomirski movq $-1, %rsi 13809b6e6a83SAndy Lutomirski call do_nmi 13819b6e6a83SAndy Lutomirski 13829b6e6a83SAndy Lutomirski /* 13839b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1384946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 13859b6e6a83SAndy Lutomirski */ 13868a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 13879b6e6a83SAndy Lutomirski 13889b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 13899b6e6a83SAndy Lutomirski /* 13900b22930eSAndy Lutomirski * Here's what our stack frame will look like: 13910b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13920b22930eSAndy Lutomirski * | original SS | 13930b22930eSAndy Lutomirski * | original Return RSP | 13940b22930eSAndy Lutomirski * | original RFLAGS | 13950b22930eSAndy Lutomirski * | original CS | 13960b22930eSAndy Lutomirski * | original RIP | 13970b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13980b22930eSAndy Lutomirski * | temp storage for rdx | 13990b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14000b22930eSAndy Lutomirski * | "NMI executing" variable | 14010b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14020b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 14030b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 14040b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 14050b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 14060b22930eSAndy Lutomirski * | iret RIP } | 14070b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14080b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 14090b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 14100b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 14110b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 14120b22930eSAndy Lutomirski * | outermost RIP } iteration. | 14130b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14140b22930eSAndy Lutomirski * | pt_regs | 14150b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14160b22930eSAndy Lutomirski * 14170b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 14180b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 14190b22930eSAndy Lutomirski * space for the asm code here. 14200b22930eSAndy Lutomirski * 14210b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 14220b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 14230b22930eSAndy Lutomirski * processing. 14240b22930eSAndy Lutomirski * 14250b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 14260b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 14270b22930eSAndy Lutomirski * frame pointing to the final return target. 14280b22930eSAndy Lutomirski */ 14290b22930eSAndy Lutomirski 14300b22930eSAndy Lutomirski /* 14310b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 14320b22930eSAndy Lutomirski * 1433a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1434a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1435a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1436a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 1437a27507caSAndy Lutomirski * about to about to call do_nmi anyway, so we can just 1438a27507caSAndy Lutomirski * resume the outer NMI. 1439a27507caSAndy Lutomirski */ 1440a27507caSAndy Lutomirski 1441a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1442a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1443a27507caSAndy Lutomirski ja 1f 1444a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1445a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1446a27507caSAndy Lutomirski ja nested_nmi_out 1447a27507caSAndy Lutomirski1: 1448a27507caSAndy Lutomirski 1449a27507caSAndy Lutomirski /* 1450a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 14510b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 14520b22930eSAndy Lutomirski * before IRET. 1453905a36a2SIngo Molnar */ 1454905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1455905a36a2SIngo Molnar je nested_nmi 1456905a36a2SIngo Molnar 1457905a36a2SIngo Molnar /* 14580b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 14590b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1460810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1461810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1462810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1463810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1464810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1465810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1466810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1467810bc075SAndy Lutomirski * "NMI executing". 1468905a36a2SIngo Molnar */ 1469905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1470905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1471905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1472905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1473905a36a2SIngo Molnar ja first_nmi 14744d732138SIngo Molnar 1475905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1476905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1477905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1478905a36a2SIngo Molnar jb first_nmi 1479810bc075SAndy Lutomirski 1480810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1481810bc075SAndy Lutomirski 1482810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1483810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1484810bc075SAndy Lutomirski 1485810bc075SAndy Lutomirski /* This is a nested NMI. */ 1486905a36a2SIngo Molnar 1487905a36a2SIngo Molnarnested_nmi: 1488905a36a2SIngo Molnar /* 14890b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 14900b22930eSAndy Lutomirski * iteration of NMI handling. 1491905a36a2SIngo Molnar */ 149223a781e9SAndy Lutomirski subq $8, %rsp 1493905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1494905a36a2SIngo Molnar pushq $__KERNEL_DS 1495905a36a2SIngo Molnar pushq %rdx 1496905a36a2SIngo Molnar pushfq 1497905a36a2SIngo Molnar pushq $__KERNEL_CS 1498905a36a2SIngo Molnar pushq $repeat_nmi 1499905a36a2SIngo Molnar 1500905a36a2SIngo Molnar /* Put stack back */ 1501905a36a2SIngo Molnar addq $(6*8), %rsp 1502905a36a2SIngo Molnar 1503905a36a2SIngo Molnarnested_nmi_out: 1504905a36a2SIngo Molnar popq %rdx 1505905a36a2SIngo Molnar 15060b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1507929bacecSAndy Lutomirski iretq 1508905a36a2SIngo Molnar 1509905a36a2SIngo Molnarfirst_nmi: 15100b22930eSAndy Lutomirski /* Restore rdx. */ 1511905a36a2SIngo Molnar movq (%rsp), %rdx 1512905a36a2SIngo Molnar 151336f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 151436f1a77bSAndy Lutomirski pushq $0 1515905a36a2SIngo Molnar 15160b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1517905a36a2SIngo Molnar subq $(5*8), %rsp 1518905a36a2SIngo Molnar 15190b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1520905a36a2SIngo Molnar .rept 5 1521905a36a2SIngo Molnar pushq 11*8(%rsp) 1522905a36a2SIngo Molnar .endr 15238c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1524905a36a2SIngo Molnar 1525905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1526905a36a2SIngo Molnar 1527a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1528a97439aaSAndy Lutomirski /* 1529a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1530a97439aaSAndy Lutomirski * default because IRET is very expensive. 1531a97439aaSAndy Lutomirski */ 1532a97439aaSAndy Lutomirski pushq $0 /* SS */ 1533a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1534a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1535a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1536a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1537a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1538929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 15398c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1540a97439aaSAndy Lutomirski1: 1541a97439aaSAndy Lutomirski#endif 1542a97439aaSAndy Lutomirski 15430b22930eSAndy Lutomirskirepeat_nmi: 1544905a36a2SIngo Molnar /* 1545905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1546905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1547905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1548905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1549905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1550905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1551905a36a2SIngo Molnar * NMI will update. 15520b22930eSAndy Lutomirski * 15530b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 15540b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 15550b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 155636f1a77bSAndy Lutomirski * gsbase if needed before we call do_nmi. "NMI executing" 155736f1a77bSAndy Lutomirski * is zero. 1558905a36a2SIngo Molnar */ 155936f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1560905a36a2SIngo Molnar 15610b22930eSAndy Lutomirski /* 15620b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 15630b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 15640b22930eSAndy Lutomirski * it or it will end up containing garbage. 15650b22930eSAndy Lutomirski */ 1566905a36a2SIngo Molnar addq $(10*8), %rsp 1567905a36a2SIngo Molnar .rept 5 1568905a36a2SIngo Molnar pushq -6*8(%rsp) 1569905a36a2SIngo Molnar .endr 1570905a36a2SIngo Molnar subq $(5*8), %rsp 1571905a36a2SIngo Molnarend_repeat_nmi: 1572905a36a2SIngo Molnar 1573905a36a2SIngo Molnar /* 15740b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 15750b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 15760b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1577905a36a2SIngo Molnar */ 1578905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1579905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1580905a36a2SIngo Molnar 1581905a36a2SIngo Molnar /* 1582905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1583905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1584905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1585905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1586905a36a2SIngo Molnar * exceptions might do. 1587905a36a2SIngo Molnar */ 1588905a36a2SIngo Molnar call paranoid_entry 15898c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1590905a36a2SIngo Molnar 1591905a36a2SIngo Molnar /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1592905a36a2SIngo Molnar movq %rsp, %rdi 1593905a36a2SIngo Molnar movq $-1, %rsi 1594905a36a2SIngo Molnar call do_nmi 1595905a36a2SIngo Molnar 1596905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1597905a36a2SIngo Molnar jnz nmi_restore 1598905a36a2SIngo Molnarnmi_swapgs: 1599905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1600905a36a2SIngo Molnarnmi_restore: 1601471ee483SAndy Lutomirski POP_EXTRA_REGS 1602471ee483SAndy Lutomirski POP_C_REGS 16030b22930eSAndy Lutomirski 1604471ee483SAndy Lutomirski /* 1605471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1606471ee483SAndy Lutomirski * at the "iret" frame. 1607471ee483SAndy Lutomirski */ 1608471ee483SAndy Lutomirski addq $6*8, %rsp 1609905a36a2SIngo Molnar 1610810bc075SAndy Lutomirski /* 1611810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1612810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1613929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1614929bacecSAndy Lutomirski * 1615929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1616929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1617929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1618810bc075SAndy Lutomirski */ 1619810bc075SAndy Lutomirski std 1620810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 16210b22930eSAndy Lutomirski 16220b22930eSAndy Lutomirski /* 1623929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1624929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1625929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1626929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 16270b22930eSAndy Lutomirski */ 1628929bacecSAndy Lutomirski iretq 1629905a36a2SIngo MolnarEND(nmi) 1630905a36a2SIngo Molnar 1631905a36a2SIngo MolnarENTRY(ignore_sysret) 16328c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1633905a36a2SIngo Molnar mov $-ENOSYS, %eax 1634905a36a2SIngo Molnar sysret 1635905a36a2SIngo MolnarEND(ignore_sysret) 16362deb4be2SAndy Lutomirski 16372deb4be2SAndy LutomirskiENTRY(rewind_stack_do_exit) 16388c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 16392deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 16402deb4be2SAndy Lutomirski xorl %ebp, %ebp 16412deb4be2SAndy Lutomirski 16422deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 16438c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 16448c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 16452deb4be2SAndy Lutomirski 16462deb4be2SAndy Lutomirski call do_exit 16472deb4be2SAndy LutomirskiEND(rewind_stack_do_exit) 1648