1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11905a36a2SIngo Molnar * Some of this is documented in Documentation/x86/entry_64.txt 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 184d732138SIngo Molnar * - ENTRY/END: Define functions in the symbol table. 194d732138SIngo Molnar * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 204d732138SIngo Molnar * - idtentry: Define exception entry points. 21905a36a2SIngo Molnar */ 22905a36a2SIngo Molnar#include <linux/linkage.h> 23905a36a2SIngo Molnar#include <asm/segment.h> 24905a36a2SIngo Molnar#include <asm/cache.h> 25905a36a2SIngo Molnar#include <asm/errno.h> 26d36f9479SIngo Molnar#include "calling.h" 27905a36a2SIngo Molnar#include <asm/asm-offsets.h> 28905a36a2SIngo Molnar#include <asm/msr.h> 29905a36a2SIngo Molnar#include <asm/unistd.h> 30905a36a2SIngo Molnar#include <asm/thread_info.h> 31905a36a2SIngo Molnar#include <asm/hw_irq.h> 32905a36a2SIngo Molnar#include <asm/page_types.h> 33905a36a2SIngo Molnar#include <asm/irqflags.h> 34905a36a2SIngo Molnar#include <asm/paravirt.h> 35905a36a2SIngo Molnar#include <asm/percpu.h> 36905a36a2SIngo Molnar#include <asm/asm.h> 37905a36a2SIngo Molnar#include <asm/smap.h> 38905a36a2SIngo Molnar#include <asm/pgtable_types.h> 39784d5699SAl Viro#include <asm/export.h> 408c1f7558SJosh Poimboeuf#include <asm/frame.h> 41905a36a2SIngo Molnar#include <linux/err.h> 42905a36a2SIngo Molnar 43905a36a2SIngo Molnar.code64 44905a36a2SIngo Molnar.section .entry.text, "ax" 45905a36a2SIngo Molnar 46905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT 47905a36a2SIngo MolnarENTRY(native_usergs_sysret64) 488c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 49905a36a2SIngo Molnar swapgs 50905a36a2SIngo Molnar sysretq 518c1f7558SJosh PoimboeufEND(native_usergs_sysret64) 52905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */ 53905a36a2SIngo Molnar 54*ca37e57bSAndy Lutomirski.macro TRACE_IRQS_FLAGS flags:req 55905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS 56*ca37e57bSAndy Lutomirski bt $9, \flags /* interrupts off? */ 57905a36a2SIngo Molnar jnc 1f 58905a36a2SIngo Molnar TRACE_IRQS_ON 59905a36a2SIngo Molnar1: 60905a36a2SIngo Molnar#endif 61905a36a2SIngo Molnar.endm 62905a36a2SIngo Molnar 63*ca37e57bSAndy Lutomirski.macro TRACE_IRQS_IRETQ 64*ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS EFLAGS(%rsp) 65*ca37e57bSAndy Lutomirski.endm 66*ca37e57bSAndy Lutomirski 67905a36a2SIngo Molnar/* 68905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint 69905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update 70905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time 71905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler 72905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 73905a36a2SIngo Molnar * 74905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 75905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top 76905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack. 77905a36a2SIngo Molnar */ 78905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 79905a36a2SIngo Molnar 80905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG 81905a36a2SIngo Molnar call debug_stack_set_zero 82905a36a2SIngo Molnar TRACE_IRQS_OFF 83905a36a2SIngo Molnar call debug_stack_reset 84905a36a2SIngo Molnar.endm 85905a36a2SIngo Molnar 86905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG 87905a36a2SIngo Molnar call debug_stack_set_zero 88905a36a2SIngo Molnar TRACE_IRQS_ON 89905a36a2SIngo Molnar call debug_stack_reset 90905a36a2SIngo Molnar.endm 91905a36a2SIngo Molnar 92905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG 93905a36a2SIngo Molnar bt $9, EFLAGS(%rsp) /* interrupts off? */ 94905a36a2SIngo Molnar jnc 1f 95905a36a2SIngo Molnar TRACE_IRQS_ON_DEBUG 96905a36a2SIngo Molnar1: 97905a36a2SIngo Molnar.endm 98905a36a2SIngo Molnar 99905a36a2SIngo Molnar#else 100905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 101905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 102905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 103905a36a2SIngo Molnar#endif 104905a36a2SIngo Molnar 105905a36a2SIngo Molnar/* 1064d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 107905a36a2SIngo Molnar * 108fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 109fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 110fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 111fda57b22SAndy Lutomirski * available when SYSCALL is used. 112fda57b22SAndy Lutomirski * 113fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 114fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 115fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 116fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 117fda57b22SAndy Lutomirski * 1184d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 119905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 120905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 121905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 122905a36a2SIngo Molnar * and does not change rsp. 123905a36a2SIngo Molnar * 124905a36a2SIngo Molnar * Registers on entry: 125905a36a2SIngo Molnar * rax system call number 126905a36a2SIngo Molnar * rcx return address 127905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 128905a36a2SIngo Molnar * rdi arg0 129905a36a2SIngo Molnar * rsi arg1 130905a36a2SIngo Molnar * rdx arg2 131905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 132905a36a2SIngo Molnar * r8 arg4 133905a36a2SIngo Molnar * r9 arg5 134905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 135905a36a2SIngo Molnar * 136905a36a2SIngo Molnar * Only called from user space. 137905a36a2SIngo Molnar * 138905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 139905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 140905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 141905a36a2SIngo Molnar */ 142905a36a2SIngo Molnar 143b2502b41SIngo MolnarENTRY(entry_SYSCALL_64) 1448c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 145905a36a2SIngo Molnar /* 146905a36a2SIngo Molnar * Interrupts are off on entry. 147905a36a2SIngo Molnar * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 148905a36a2SIngo Molnar * it is too small to ever cause noticeable irq latency. 149905a36a2SIngo Molnar */ 150905a36a2SIngo Molnar 1518a9949bcSAndy Lutomirski swapgs 152905a36a2SIngo Molnar movq %rsp, PER_CPU_VAR(rsp_scratch) 153905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 154905a36a2SIngo Molnar 155905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 156905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 157905a36a2SIngo Molnar pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 158905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 159905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 160905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 1618a9949bcSAndy LutomirskiGLOBAL(entry_SYSCALL_64_after_hwframe) 162905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 163905a36a2SIngo Molnar pushq %rdi /* pt_regs->di */ 164905a36a2SIngo Molnar pushq %rsi /* pt_regs->si */ 165905a36a2SIngo Molnar pushq %rdx /* pt_regs->dx */ 166905a36a2SIngo Molnar pushq %rcx /* pt_regs->cx */ 167905a36a2SIngo Molnar pushq $-ENOSYS /* pt_regs->ax */ 168905a36a2SIngo Molnar pushq %r8 /* pt_regs->r8 */ 169905a36a2SIngo Molnar pushq %r9 /* pt_regs->r9 */ 170905a36a2SIngo Molnar pushq %r10 /* pt_regs->r10 */ 171905a36a2SIngo Molnar pushq %r11 /* pt_regs->r11 */ 172905a36a2SIngo Molnar sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ 1738c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 174905a36a2SIngo Molnar 175548c3050SAndy Lutomirski TRACE_IRQS_OFF 176548c3050SAndy Lutomirski 1771e423bffSAndy Lutomirski /* 1781e423bffSAndy Lutomirski * If we need to do entry work or if we guess we'll need to do 1791e423bffSAndy Lutomirski * exit work, go straight to the slow path. 1801e423bffSAndy Lutomirski */ 18115f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 18215f4eae7SAndy Lutomirski testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 1831e423bffSAndy Lutomirski jnz entry_SYSCALL64_slow_path 1841e423bffSAndy Lutomirski 185b2502b41SIngo Molnarentry_SYSCALL_64_fastpath: 1861e423bffSAndy Lutomirski /* 1871e423bffSAndy Lutomirski * Easy case: enable interrupts and issue the syscall. If the syscall 1881e423bffSAndy Lutomirski * needs pt_regs, we'll call a stub that disables interrupts again 1891e423bffSAndy Lutomirski * and jumps to the slow path. 1901e423bffSAndy Lutomirski */ 1911e423bffSAndy Lutomirski TRACE_IRQS_ON 1921e423bffSAndy Lutomirski ENABLE_INTERRUPTS(CLBR_NONE) 193905a36a2SIngo Molnar#if __SYSCALL_MASK == ~0 194905a36a2SIngo Molnar cmpq $__NR_syscall_max, %rax 195905a36a2SIngo Molnar#else 196905a36a2SIngo Molnar andl $__SYSCALL_MASK, %eax 197905a36a2SIngo Molnar cmpl $__NR_syscall_max, %eax 198905a36a2SIngo Molnar#endif 199905a36a2SIngo Molnar ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 200905a36a2SIngo Molnar movq %r10, %rcx 201302f5b26SAndy Lutomirski 202302f5b26SAndy Lutomirski /* 203302f5b26SAndy Lutomirski * This call instruction is handled specially in stub_ptregs_64. 204b7765086SAndy Lutomirski * It might end up jumping to the slow path. If it jumps, RAX 205b7765086SAndy Lutomirski * and all argument registers are clobbered. 206302f5b26SAndy Lutomirski */ 207905a36a2SIngo Molnar call *sys_call_table(, %rax, 8) 208302f5b26SAndy Lutomirski.Lentry_SYSCALL_64_after_fastpath_call: 209302f5b26SAndy Lutomirski 210905a36a2SIngo Molnar movq %rax, RAX(%rsp) 211905a36a2SIngo Molnar1: 2121e423bffSAndy Lutomirski 213905a36a2SIngo Molnar /* 2141e423bffSAndy Lutomirski * If we get here, then we know that pt_regs is clean for SYSRET64. 2151e423bffSAndy Lutomirski * If we see that no exit work is required (which we are required 2161e423bffSAndy Lutomirski * to check with IRQs off), then we can go straight to SYSRET64. 217905a36a2SIngo Molnar */ 2182140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 2191e423bffSAndy Lutomirski TRACE_IRQS_OFF 22015f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 22115f4eae7SAndy Lutomirski testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 2221e423bffSAndy Lutomirski jnz 1f 223905a36a2SIngo Molnar 2241e423bffSAndy Lutomirski LOCKDEP_SYS_EXIT 2251e423bffSAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQs on */ 226eb2a54c3SAndy Lutomirski movq RIP(%rsp), %rcx 227eb2a54c3SAndy Lutomirski movq EFLAGS(%rsp), %r11 228a5122106SAndy Lutomirski addq $6*8, %rsp /* skip extra regs -- they were preserved */ 2298c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 230a5122106SAndy Lutomirski jmp .Lpop_c_regs_except_rcx_r11_and_sysret 231905a36a2SIngo Molnar 2321e423bffSAndy Lutomirski1: 2331e423bffSAndy Lutomirski /* 2341e423bffSAndy Lutomirski * The fast path looked good when we started, but something changed 2351e423bffSAndy Lutomirski * along the way and we need to switch to the slow path. Calling 2361e423bffSAndy Lutomirski * raise(3) will trigger this, for example. IRQs are off. 2371e423bffSAndy Lutomirski */ 23829ea1b25SAndy Lutomirski TRACE_IRQS_ON 2392140a994SJan Beulich ENABLE_INTERRUPTS(CLBR_ANY) 240905a36a2SIngo Molnar SAVE_EXTRA_REGS 24129ea1b25SAndy Lutomirski movq %rsp, %rdi 24229ea1b25SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 2431e423bffSAndy Lutomirski jmp return_from_SYSCALL_64 2441e423bffSAndy Lutomirski 2451e423bffSAndy Lutomirskientry_SYSCALL64_slow_path: 2461e423bffSAndy Lutomirski /* IRQs are off. */ 2471e423bffSAndy Lutomirski SAVE_EXTRA_REGS 2481e423bffSAndy Lutomirski movq %rsp, %rdi 2491e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 2501e423bffSAndy Lutomirski 2511e423bffSAndy Lutomirskireturn_from_SYSCALL_64: 25229ea1b25SAndy Lutomirski TRACE_IRQS_IRETQ /* we're about to change IF */ 253905a36a2SIngo Molnar 254905a36a2SIngo Molnar /* 255905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 2568a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 2578a055d7fSAndy Lutomirski * go to the slow exit path. 258905a36a2SIngo Molnar */ 259905a36a2SIngo Molnar movq RCX(%rsp), %rcx 260905a36a2SIngo Molnar movq RIP(%rsp), %r11 2618a055d7fSAndy Lutomirski 2628a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 2638a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 264905a36a2SIngo Molnar 265905a36a2SIngo Molnar /* 266905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 267905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 268905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 269905a36a2SIngo Molnar * 270905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 271905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 272361b4b58SKirill A. Shutemov * 273cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 274cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 275905a36a2SIngo Molnar */ 276905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 277905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 2784d732138SIngo Molnar 279905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 280905a36a2SIngo Molnar cmpq %rcx, %r11 2818a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 282905a36a2SIngo Molnar 283905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 2848a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 285905a36a2SIngo Molnar 286905a36a2SIngo Molnar movq R11(%rsp), %r11 287905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 2888a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 289905a36a2SIngo Molnar 290905a36a2SIngo Molnar /* 2913e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 2923e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 2933e035305SBorislav Petkov * need to restore it correctly. 2943e035305SBorislav Petkov * 2953e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 2963e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 2973e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 2983e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 2993e035305SBorislav Petkov * this user code: 300905a36a2SIngo Molnar * 301905a36a2SIngo Molnar * movq $stuck_here, %rcx 302905a36a2SIngo Molnar * pushfq 303905a36a2SIngo Molnar * popq %r11 304905a36a2SIngo Molnar * stuck_here: 305905a36a2SIngo Molnar * 306905a36a2SIngo Molnar * would never get past 'stuck_here'. 307905a36a2SIngo Molnar */ 308905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 3098a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 310905a36a2SIngo Molnar 311905a36a2SIngo Molnar /* nothing to check for RSP */ 312905a36a2SIngo Molnar 313905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 3148a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 315905a36a2SIngo Molnar 316905a36a2SIngo Molnar /* 317905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 318905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 319905a36a2SIngo Molnar */ 320905a36a2SIngo Molnarsyscall_return_via_sysret: 321905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 3228c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 3234fbb3910SAndy Lutomirski POP_EXTRA_REGS 324a5122106SAndy Lutomirski.Lpop_c_regs_except_rcx_r11_and_sysret: 3254fbb3910SAndy Lutomirski popq %rsi /* skip r11 */ 3264fbb3910SAndy Lutomirski popq %r10 3274fbb3910SAndy Lutomirski popq %r9 3284fbb3910SAndy Lutomirski popq %r8 3294fbb3910SAndy Lutomirski popq %rax 3304fbb3910SAndy Lutomirski popq %rsi /* skip rcx */ 3314fbb3910SAndy Lutomirski popq %rdx 3324fbb3910SAndy Lutomirski popq %rsi 3334fbb3910SAndy Lutomirski popq %rdi 3344fbb3910SAndy Lutomirski movq RSP-ORIG_RAX(%rsp), %rsp 335905a36a2SIngo Molnar USERGS_SYSRET64 336b2502b41SIngo MolnarEND(entry_SYSCALL_64) 337905a36a2SIngo Molnar 338302f5b26SAndy LutomirskiENTRY(stub_ptregs_64) 339302f5b26SAndy Lutomirski /* 340302f5b26SAndy Lutomirski * Syscalls marked as needing ptregs land here. 341b7765086SAndy Lutomirski * If we are on the fast path, we need to save the extra regs, 342b7765086SAndy Lutomirski * which we achieve by trying again on the slow path. If we are on 343b7765086SAndy Lutomirski * the slow path, the extra regs are already saved. 344302f5b26SAndy Lutomirski * 345302f5b26SAndy Lutomirski * RAX stores a pointer to the C function implementing the syscall. 346b7765086SAndy Lutomirski * IRQs are on. 347302f5b26SAndy Lutomirski */ 348302f5b26SAndy Lutomirski cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) 349302f5b26SAndy Lutomirski jne 1f 350302f5b26SAndy Lutomirski 351b7765086SAndy Lutomirski /* 352b7765086SAndy Lutomirski * Called from fast path -- disable IRQs again, pop return address 353b7765086SAndy Lutomirski * and jump to slow path 354b7765086SAndy Lutomirski */ 3552140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 356b7765086SAndy Lutomirski TRACE_IRQS_OFF 357302f5b26SAndy Lutomirski popq %rax 3588c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 359b7765086SAndy Lutomirski jmp entry_SYSCALL64_slow_path 360302f5b26SAndy Lutomirski 361302f5b26SAndy Lutomirski1: 362b3830e8dSBorislav Petkov jmp *%rax /* Called from C */ 363302f5b26SAndy LutomirskiEND(stub_ptregs_64) 364302f5b26SAndy Lutomirski 365302f5b26SAndy Lutomirski.macro ptregs_stub func 366302f5b26SAndy LutomirskiENTRY(ptregs_\func) 3678c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 368302f5b26SAndy Lutomirski leaq \func(%rip), %rax 369302f5b26SAndy Lutomirski jmp stub_ptregs_64 370302f5b26SAndy LutomirskiEND(ptregs_\func) 371302f5b26SAndy Lutomirski.endm 372302f5b26SAndy Lutomirski 373302f5b26SAndy Lutomirski/* Instantiate ptregs_stub for each ptregs-using syscall */ 374302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_(sym) 375302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym 376302f5b26SAndy Lutomirski#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) 377302f5b26SAndy Lutomirski#include <asm/syscalls_64.h> 378905a36a2SIngo Molnar 379905a36a2SIngo Molnar/* 3800100301bSBrian Gerst * %rdi: prev task 3810100301bSBrian Gerst * %rsi: next task 3820100301bSBrian Gerst */ 3830100301bSBrian GerstENTRY(__switch_to_asm) 3848c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 3850100301bSBrian Gerst /* 3860100301bSBrian Gerst * Save callee-saved registers 3870100301bSBrian Gerst * This must match the order in inactive_task_frame 3880100301bSBrian Gerst */ 3890100301bSBrian Gerst pushq %rbp 3900100301bSBrian Gerst pushq %rbx 3910100301bSBrian Gerst pushq %r12 3920100301bSBrian Gerst pushq %r13 3930100301bSBrian Gerst pushq %r14 3940100301bSBrian Gerst pushq %r15 3950100301bSBrian Gerst 3960100301bSBrian Gerst /* switch stack */ 3970100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 3980100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 3990100301bSBrian Gerst 4000100301bSBrian Gerst#ifdef CONFIG_CC_STACKPROTECTOR 4010100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 4020100301bSBrian Gerst movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 4030100301bSBrian Gerst#endif 4040100301bSBrian Gerst 4050100301bSBrian Gerst /* restore callee-saved registers */ 4060100301bSBrian Gerst popq %r15 4070100301bSBrian Gerst popq %r14 4080100301bSBrian Gerst popq %r13 4090100301bSBrian Gerst popq %r12 4100100301bSBrian Gerst popq %rbx 4110100301bSBrian Gerst popq %rbp 4120100301bSBrian Gerst 4130100301bSBrian Gerst jmp __switch_to 4140100301bSBrian GerstEND(__switch_to_asm) 4150100301bSBrian Gerst 4160100301bSBrian Gerst/* 417905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 418905a36a2SIngo Molnar * 4190100301bSBrian Gerst * rax: prev task we switched from 420616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 421616d2483SBrian Gerst * r12: kernel thread arg 422905a36a2SIngo Molnar */ 423905a36a2SIngo MolnarENTRY(ret_from_fork) 4248c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 4250100301bSBrian Gerst movq %rax, %rdi 4264d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 427905a36a2SIngo Molnar 428616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 429616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 430905a36a2SIngo Molnar 431616d2483SBrian Gerst2: 4328c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 433ebd57499SJosh Poimboeuf movq %rsp, %rdi 43424d978b7SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 43524d978b7SAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQS on */ 4368a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 437616d2483SBrian Gerst 438616d2483SBrian Gerst1: 439616d2483SBrian Gerst /* kernel thread */ 440616d2483SBrian Gerst movq %r12, %rdi 441616d2483SBrian Gerst call *%rbx 442616d2483SBrian Gerst /* 443616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 444616d2483SBrian Gerst * calling do_execve(). Exit to userspace to complete the execve() 445616d2483SBrian Gerst * syscall. 446616d2483SBrian Gerst */ 447616d2483SBrian Gerst movq $0, RAX(%rsp) 448616d2483SBrian Gerst jmp 2b 449905a36a2SIngo MolnarEND(ret_from_fork) 450905a36a2SIngo Molnar 451905a36a2SIngo Molnar/* 452905a36a2SIngo Molnar * Build the entry stubs with some assembler magic. 453905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block. 454905a36a2SIngo Molnar */ 455905a36a2SIngo Molnar .align 8 456905a36a2SIngo MolnarENTRY(irq_entries_start) 457905a36a2SIngo Molnar vector=FIRST_EXTERNAL_VECTOR 458905a36a2SIngo Molnar .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 4598c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 460905a36a2SIngo Molnar pushq $(~vector+0x80) /* Note: always in signed byte range */ 461905a36a2SIngo Molnar jmp common_interrupt 462905a36a2SIngo Molnar .align 8 4638c1f7558SJosh Poimboeuf vector=vector+1 464905a36a2SIngo Molnar .endr 465905a36a2SIngo MolnarEND(irq_entries_start) 466905a36a2SIngo Molnar 4671d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 4681d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 4691d3e53e8SAndy Lutomirski pushfq 4701d3e53e8SAndy Lutomirski testl $X86_EFLAGS_IF, (%rsp) 4711d3e53e8SAndy Lutomirski jz .Lokay_\@ 4721d3e53e8SAndy Lutomirski ud2 4731d3e53e8SAndy Lutomirski.Lokay_\@: 4741d3e53e8SAndy Lutomirski addq $8, %rsp 4751d3e53e8SAndy Lutomirski#endif 4761d3e53e8SAndy Lutomirski.endm 4771d3e53e8SAndy Lutomirski 4781d3e53e8SAndy Lutomirski/* 4791d3e53e8SAndy Lutomirski * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 4801d3e53e8SAndy Lutomirski * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 4811d3e53e8SAndy Lutomirski * Requires kernel GSBASE. 4821d3e53e8SAndy Lutomirski * 4831d3e53e8SAndy Lutomirski * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 4841d3e53e8SAndy Lutomirski */ 4858c1f7558SJosh Poimboeuf.macro ENTER_IRQ_STACK regs=1 old_rsp 4861d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 4871d3e53e8SAndy Lutomirski movq %rsp, \old_rsp 4888c1f7558SJosh Poimboeuf 4898c1f7558SJosh Poimboeuf .if \regs 4908c1f7558SJosh Poimboeuf UNWIND_HINT_REGS base=\old_rsp 4918c1f7558SJosh Poimboeuf .endif 4928c1f7558SJosh Poimboeuf 4931d3e53e8SAndy Lutomirski incl PER_CPU_VAR(irq_count) 49429955909SAndy Lutomirski jnz .Lirq_stack_push_old_rsp_\@ 4951d3e53e8SAndy Lutomirski 4961d3e53e8SAndy Lutomirski /* 4971d3e53e8SAndy Lutomirski * Right now, if we just incremented irq_count to zero, we've 4981d3e53e8SAndy Lutomirski * claimed the IRQ stack but we haven't switched to it yet. 4991d3e53e8SAndy Lutomirski * 5001d3e53e8SAndy Lutomirski * If anything is added that can interrupt us here without using IST, 5011d3e53e8SAndy Lutomirski * it must be *extremely* careful to limit its stack usage. This 5021d3e53e8SAndy Lutomirski * could include kprobes and a hypothetical future IST-less #DB 5031d3e53e8SAndy Lutomirski * handler. 50429955909SAndy Lutomirski * 50529955909SAndy Lutomirski * The OOPS unwinder relies on the word at the top of the IRQ 50629955909SAndy Lutomirski * stack linking back to the previous RSP for the entire time we're 50729955909SAndy Lutomirski * on the IRQ stack. For this to work reliably, we need to write 50829955909SAndy Lutomirski * it before we actually move ourselves to the IRQ stack. 5091d3e53e8SAndy Lutomirski */ 5101d3e53e8SAndy Lutomirski 51129955909SAndy Lutomirski movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 51229955909SAndy Lutomirski movq PER_CPU_VAR(irq_stack_ptr), %rsp 51329955909SAndy Lutomirski 51429955909SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 51529955909SAndy Lutomirski /* 51629955909SAndy Lutomirski * If the first movq above becomes wrong due to IRQ stack layout 51729955909SAndy Lutomirski * changes, the only way we'll notice is if we try to unwind right 51829955909SAndy Lutomirski * here. Assert that we set up the stack right to catch this type 51929955909SAndy Lutomirski * of bug quickly. 52029955909SAndy Lutomirski */ 52129955909SAndy Lutomirski cmpq -8(%rsp), \old_rsp 52229955909SAndy Lutomirski je .Lirq_stack_okay\@ 52329955909SAndy Lutomirski ud2 52429955909SAndy Lutomirski .Lirq_stack_okay\@: 52529955909SAndy Lutomirski#endif 52629955909SAndy Lutomirski 52729955909SAndy Lutomirski.Lirq_stack_push_old_rsp_\@: 5281d3e53e8SAndy Lutomirski pushq \old_rsp 5298c1f7558SJosh Poimboeuf 5308c1f7558SJosh Poimboeuf .if \regs 5318c1f7558SJosh Poimboeuf UNWIND_HINT_REGS indirect=1 5328c1f7558SJosh Poimboeuf .endif 5331d3e53e8SAndy Lutomirski.endm 5341d3e53e8SAndy Lutomirski 5351d3e53e8SAndy Lutomirski/* 5361d3e53e8SAndy Lutomirski * Undoes ENTER_IRQ_STACK. 5371d3e53e8SAndy Lutomirski */ 5388c1f7558SJosh Poimboeuf.macro LEAVE_IRQ_STACK regs=1 5391d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 5401d3e53e8SAndy Lutomirski /* We need to be off the IRQ stack before decrementing irq_count. */ 5411d3e53e8SAndy Lutomirski popq %rsp 5421d3e53e8SAndy Lutomirski 5438c1f7558SJosh Poimboeuf .if \regs 5448c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 5458c1f7558SJosh Poimboeuf .endif 5468c1f7558SJosh Poimboeuf 5471d3e53e8SAndy Lutomirski /* 5481d3e53e8SAndy Lutomirski * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 5491d3e53e8SAndy Lutomirski * the irq stack but we're not on it. 5501d3e53e8SAndy Lutomirski */ 5511d3e53e8SAndy Lutomirski 5521d3e53e8SAndy Lutomirski decl PER_CPU_VAR(irq_count) 5531d3e53e8SAndy Lutomirski.endm 5541d3e53e8SAndy Lutomirski 555905a36a2SIngo Molnar/* 556905a36a2SIngo Molnar * Interrupt entry/exit. 557905a36a2SIngo Molnar * 558905a36a2SIngo Molnar * Interrupt entry points save only callee clobbered registers in fast path. 559905a36a2SIngo Molnar * 560905a36a2SIngo Molnar * Entry runs with interrupts off. 561905a36a2SIngo Molnar */ 562905a36a2SIngo Molnar 563905a36a2SIngo Molnar/* 0(%rsp): ~(interrupt number) */ 564905a36a2SIngo Molnar .macro interrupt func 565905a36a2SIngo Molnar cld 566ff467594SAndy Lutomirski ALLOC_PT_GPREGS_ON_STACK 567ff467594SAndy Lutomirski SAVE_C_REGS 568ff467594SAndy Lutomirski SAVE_EXTRA_REGS 569946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 570905a36a2SIngo Molnar 571ff467594SAndy Lutomirski testb $3, CS(%rsp) 572905a36a2SIngo Molnar jz 1f 57302bc7768SAndy Lutomirski 57402bc7768SAndy Lutomirski /* 57502bc7768SAndy Lutomirski * IRQ from user mode. Switch to kernel gsbase and inform context 57602bc7768SAndy Lutomirski * tracking that we're in kernel mode. 57702bc7768SAndy Lutomirski */ 578905a36a2SIngo Molnar SWAPGS 579f1075053SAndy Lutomirski 580f1075053SAndy Lutomirski /* 581f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 582f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 583f1075053SAndy Lutomirski * (which can take locks). Since TRACE_IRQS_OFF idempotent, 584f1075053SAndy Lutomirski * the simplest way to handle it is to just call it twice if 585f1075053SAndy Lutomirski * we enter from user mode. There's no reason to optimize this since 586f1075053SAndy Lutomirski * TRACE_IRQS_OFF is a no-op if lockdep is off. 587f1075053SAndy Lutomirski */ 588f1075053SAndy Lutomirski TRACE_IRQS_OFF 589f1075053SAndy Lutomirski 590478dc89cSAndy Lutomirski CALL_enter_from_user_mode 59102bc7768SAndy Lutomirski 592905a36a2SIngo Molnar1: 5931d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%rdi 594905a36a2SIngo Molnar /* We entered an interrupt context - irqs are off: */ 595905a36a2SIngo Molnar TRACE_IRQS_OFF 596905a36a2SIngo Molnar 597a586f98eSAndy Lutomirski call \func /* rdi points to pt_regs */ 598905a36a2SIngo Molnar .endm 599905a36a2SIngo Molnar 600905a36a2SIngo Molnar /* 601905a36a2SIngo Molnar * The interrupt stubs push (~vector+0x80) onto the stack and 602905a36a2SIngo Molnar * then jump to common_interrupt. 603905a36a2SIngo Molnar */ 604905a36a2SIngo Molnar .p2align CONFIG_X86_L1_CACHE_SHIFT 605905a36a2SIngo Molnarcommon_interrupt: 606905a36a2SIngo Molnar ASM_CLAC 607905a36a2SIngo Molnar addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 608905a36a2SIngo Molnar interrupt do_IRQ 609905a36a2SIngo Molnar /* 0(%rsp): old RSP */ 610905a36a2SIngo Molnarret_from_intr: 6112140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 612905a36a2SIngo Molnar TRACE_IRQS_OFF 613905a36a2SIngo Molnar 6141d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 615905a36a2SIngo Molnar 616905a36a2SIngo Molnar testb $3, CS(%rsp) 617905a36a2SIngo Molnar jz retint_kernel 61802bc7768SAndy Lutomirski 619905a36a2SIngo Molnar /* Interrupt came from user space */ 62002bc7768SAndy LutomirskiGLOBAL(retint_user) 62102bc7768SAndy Lutomirski mov %rsp,%rdi 62202bc7768SAndy Lutomirski call prepare_exit_to_usermode 623905a36a2SIngo Molnar TRACE_IRQS_IRETQ 62426c4ef9cSAndy Lutomirski 6258a055d7fSAndy LutomirskiGLOBAL(swapgs_restore_regs_and_return_to_usermode) 62626c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 62726c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 6281e4c4f61SBorislav Petkov testb $3, CS(%rsp) 62926c4ef9cSAndy Lutomirski jnz 1f 63026c4ef9cSAndy Lutomirski ud2 63126c4ef9cSAndy Lutomirski1: 63226c4ef9cSAndy Lutomirski#endif 6338a055d7fSAndy Lutomirski SWAPGS 634e872045bSAndy Lutomirski POP_EXTRA_REGS 635e872045bSAndy Lutomirski POP_C_REGS 636e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 63726c4ef9cSAndy Lutomirski INTERRUPT_RETURN 63826c4ef9cSAndy Lutomirski 639905a36a2SIngo Molnar 640905a36a2SIngo Molnar/* Returning to kernel space */ 641905a36a2SIngo Molnarretint_kernel: 642905a36a2SIngo Molnar#ifdef CONFIG_PREEMPT 643905a36a2SIngo Molnar /* Interrupts are off */ 644905a36a2SIngo Molnar /* Check if we need preemption */ 6454d732138SIngo Molnar bt $9, EFLAGS(%rsp) /* were interrupts off? */ 646905a36a2SIngo Molnar jnc 1f 647905a36a2SIngo Molnar0: cmpl $0, PER_CPU_VAR(__preempt_count) 648905a36a2SIngo Molnar jnz 1f 649905a36a2SIngo Molnar call preempt_schedule_irq 650905a36a2SIngo Molnar jmp 0b 651905a36a2SIngo Molnar1: 652905a36a2SIngo Molnar#endif 653905a36a2SIngo Molnar /* 654905a36a2SIngo Molnar * The iretq could re-enable interrupts: 655905a36a2SIngo Molnar */ 656905a36a2SIngo Molnar TRACE_IRQS_IRETQ 657905a36a2SIngo Molnar 65826c4ef9cSAndy LutomirskiGLOBAL(restore_regs_and_return_to_kernel) 65926c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 66026c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 6611e4c4f61SBorislav Petkov testb $3, CS(%rsp) 66226c4ef9cSAndy Lutomirski jz 1f 66326c4ef9cSAndy Lutomirski ud2 66426c4ef9cSAndy Lutomirski1: 66526c4ef9cSAndy Lutomirski#endif 666e872045bSAndy Lutomirski POP_EXTRA_REGS 667e872045bSAndy Lutomirski POP_C_REGS 668e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 669905a36a2SIngo Molnar INTERRUPT_RETURN 670905a36a2SIngo Molnar 671905a36a2SIngo MolnarENTRY(native_iret) 6728c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 673905a36a2SIngo Molnar /* 674905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 675905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 676905a36a2SIngo Molnar */ 677905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 678905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 679905a36a2SIngo Molnar jnz native_irq_return_ldt 680905a36a2SIngo Molnar#endif 681905a36a2SIngo Molnar 682905a36a2SIngo Molnar.global native_irq_return_iret 683905a36a2SIngo Molnarnative_irq_return_iret: 684905a36a2SIngo Molnar /* 685905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 686905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 687905a36a2SIngo Molnar * Double-faults due to espfix64 are handled in do_double_fault. 688905a36a2SIngo Molnar * Other faults here are fatal. 689905a36a2SIngo Molnar */ 690905a36a2SIngo Molnar iretq 691905a36a2SIngo Molnar 692905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 693905a36a2SIngo Molnarnative_irq_return_ldt: 69485063facSAndy Lutomirski /* 69585063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 69685063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 69785063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 69885063facSAndy Lutomirski * of the ESPFIX stack. 69985063facSAndy Lutomirski * 70085063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 70185063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 70285063facSAndy Lutomirski * 70385063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 70485063facSAndy Lutomirski * 70585063facSAndy Lutomirski * --- top of ESPFIX stack --- 70685063facSAndy Lutomirski * SS 70785063facSAndy Lutomirski * RSP 70885063facSAndy Lutomirski * RFLAGS 70985063facSAndy Lutomirski * CS 71085063facSAndy Lutomirski * RIP <-- RSP points here when we're done 71185063facSAndy Lutomirski * RAX <-- espfix_waddr points here 71285063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 71385063facSAndy Lutomirski */ 71485063facSAndy Lutomirski 71585063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 716905a36a2SIngo Molnar SWAPGS 717905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 71885063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 71985063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 720905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 72185063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 722905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 72385063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 724905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 72585063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 726905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 72785063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 728905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 72985063facSAndy Lutomirski /* Now RAX == RSP. */ 73085063facSAndy Lutomirski 73185063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 73285063facSAndy Lutomirski popq %rdi /* Restore user RDI */ 73385063facSAndy Lutomirski 73485063facSAndy Lutomirski /* 73585063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 73685063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 73785063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 73885063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 73985063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 74085063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 74185063facSAndy Lutomirski */ 742905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 743905a36a2SIngo Molnar SWAPGS 744905a36a2SIngo Molnar movq %rax, %rsp 7458c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 74685063facSAndy Lutomirski 74785063facSAndy Lutomirski /* 74885063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 74985063facSAndy Lutomirski * still read. 75085063facSAndy Lutomirski */ 75185063facSAndy Lutomirski popq %rax /* Restore user RAX */ 75285063facSAndy Lutomirski 75385063facSAndy Lutomirski /* 75485063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 75585063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 75685063facSAndy Lutomirski * values. We can now IRET back to userspace. 75785063facSAndy Lutomirski */ 758905a36a2SIngo Molnar jmp native_irq_return_iret 759905a36a2SIngo Molnar#endif 760905a36a2SIngo MolnarEND(common_interrupt) 761905a36a2SIngo Molnar 762905a36a2SIngo Molnar/* 763905a36a2SIngo Molnar * APIC interrupts. 764905a36a2SIngo Molnar */ 765905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym 766905a36a2SIngo MolnarENTRY(\sym) 7678c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 768905a36a2SIngo Molnar ASM_CLAC 769905a36a2SIngo Molnar pushq $~(\num) 770905a36a2SIngo Molnar.Lcommon_\sym: 771905a36a2SIngo Molnar interrupt \do_sym 772905a36a2SIngo Molnar jmp ret_from_intr 773905a36a2SIngo MolnarEND(\sym) 774905a36a2SIngo Molnar.endm 775905a36a2SIngo Molnar 776469f0023SAlexander Potapenko/* Make sure APIC interrupt handlers end up in the irqentry section: */ 777469f0023SAlexander Potapenko#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 778469f0023SAlexander Potapenko#define POP_SECTION_IRQENTRY .popsection 779469f0023SAlexander Potapenko 780905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym 781469f0023SAlexander PotapenkoPUSH_SECTION_IRQENTRY 782905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym 783469f0023SAlexander PotapenkoPOP_SECTION_IRQENTRY 784905a36a2SIngo Molnar.endm 785905a36a2SIngo Molnar 786905a36a2SIngo Molnar#ifdef CONFIG_SMP 7874d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 7884d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 789905a36a2SIngo Molnar#endif 790905a36a2SIngo Molnar 791905a36a2SIngo Molnar#ifdef CONFIG_X86_UV 7924d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 793905a36a2SIngo Molnar#endif 7944d732138SIngo Molnar 7954d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 7964d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 797905a36a2SIngo Molnar 798905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM 7994d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 8004d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 801210f84b0SWincy Vanapicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 802905a36a2SIngo Molnar#endif 803905a36a2SIngo Molnar 804905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD 8054d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 806905a36a2SIngo Molnar#endif 807905a36a2SIngo Molnar 8089dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD 8094d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 8109dda1658SIngo Molnar#endif 8119dda1658SIngo Molnar 812905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR 8134d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 814905a36a2SIngo Molnar#endif 815905a36a2SIngo Molnar 816905a36a2SIngo Molnar#ifdef CONFIG_SMP 8174d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 8184d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 8194d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 820905a36a2SIngo Molnar#endif 821905a36a2SIngo Molnar 8224d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 8234d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 824905a36a2SIngo Molnar 825905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK 8264d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 827905a36a2SIngo Molnar#endif 828905a36a2SIngo Molnar 829905a36a2SIngo Molnar/* 830905a36a2SIngo Molnar * Exception entry points. 831905a36a2SIngo Molnar */ 832905a36a2SIngo Molnar#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) 833905a36a2SIngo Molnar 834905a36a2SIngo Molnar.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 835905a36a2SIngo MolnarENTRY(\sym) 83698990a33SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=\has_error_code*8 8378c1f7558SJosh Poimboeuf 838905a36a2SIngo Molnar /* Sanity check */ 839905a36a2SIngo Molnar .if \shift_ist != -1 && \paranoid == 0 840905a36a2SIngo Molnar .error "using shift_ist requires paranoid=1" 841905a36a2SIngo Molnar .endif 842905a36a2SIngo Molnar 843905a36a2SIngo Molnar ASM_CLAC 844905a36a2SIngo Molnar 84582c62fa0SJosh Poimboeuf .if \has_error_code == 0 846905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 847905a36a2SIngo Molnar .endif 848905a36a2SIngo Molnar 849905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 850905a36a2SIngo Molnar 851905a36a2SIngo Molnar .if \paranoid 852905a36a2SIngo Molnar .if \paranoid == 1 8534d732138SIngo Molnar testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 8544d732138SIngo Molnar jnz 1f 855905a36a2SIngo Molnar .endif 856905a36a2SIngo Molnar call paranoid_entry 857905a36a2SIngo Molnar .else 858905a36a2SIngo Molnar call error_entry 859905a36a2SIngo Molnar .endif 8608c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 861905a36a2SIngo Molnar /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 862905a36a2SIngo Molnar 863905a36a2SIngo Molnar .if \paranoid 864905a36a2SIngo Molnar .if \shift_ist != -1 865905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 866905a36a2SIngo Molnar .else 867905a36a2SIngo Molnar TRACE_IRQS_OFF 868905a36a2SIngo Molnar .endif 869905a36a2SIngo Molnar .endif 870905a36a2SIngo Molnar 871905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 872905a36a2SIngo Molnar 873905a36a2SIngo Molnar .if \has_error_code 874905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 875905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 876905a36a2SIngo Molnar .else 877905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 878905a36a2SIngo Molnar .endif 879905a36a2SIngo Molnar 880905a36a2SIngo Molnar .if \shift_ist != -1 881905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 882905a36a2SIngo Molnar .endif 883905a36a2SIngo Molnar 884905a36a2SIngo Molnar call \do_sym 885905a36a2SIngo Molnar 886905a36a2SIngo Molnar .if \shift_ist != -1 887905a36a2SIngo Molnar addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 888905a36a2SIngo Molnar .endif 889905a36a2SIngo Molnar 890905a36a2SIngo Molnar /* these procedures expect "no swapgs" flag in ebx */ 891905a36a2SIngo Molnar .if \paranoid 892905a36a2SIngo Molnar jmp paranoid_exit 893905a36a2SIngo Molnar .else 894905a36a2SIngo Molnar jmp error_exit 895905a36a2SIngo Molnar .endif 896905a36a2SIngo Molnar 897905a36a2SIngo Molnar .if \paranoid == 1 898905a36a2SIngo Molnar /* 899905a36a2SIngo Molnar * Paranoid entry from userspace. Switch stacks and treat it 900905a36a2SIngo Molnar * as a normal entry. This means that paranoid handlers 901905a36a2SIngo Molnar * run in real process context if user_mode(regs). 902905a36a2SIngo Molnar */ 903905a36a2SIngo Molnar1: 904905a36a2SIngo Molnar call error_entry 905905a36a2SIngo Molnar 906905a36a2SIngo Molnar 907905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 908905a36a2SIngo Molnar call sync_regs 909905a36a2SIngo Molnar movq %rax, %rsp /* switch stack */ 910905a36a2SIngo Molnar 911905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 912905a36a2SIngo Molnar 913905a36a2SIngo Molnar .if \has_error_code 914905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 915905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 916905a36a2SIngo Molnar .else 917905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 918905a36a2SIngo Molnar .endif 919905a36a2SIngo Molnar 920905a36a2SIngo Molnar call \do_sym 921905a36a2SIngo Molnar 922905a36a2SIngo Molnar jmp error_exit /* %ebx: no swapgs flag */ 923905a36a2SIngo Molnar .endif 924905a36a2SIngo MolnarEND(\sym) 925905a36a2SIngo Molnar.endm 926905a36a2SIngo Molnar 927905a36a2SIngo Molnaridtentry divide_error do_divide_error has_error_code=0 928905a36a2SIngo Molnaridtentry overflow do_overflow has_error_code=0 929905a36a2SIngo Molnaridtentry bounds do_bounds has_error_code=0 930905a36a2SIngo Molnaridtentry invalid_op do_invalid_op has_error_code=0 931905a36a2SIngo Molnaridtentry device_not_available do_device_not_available has_error_code=0 932905a36a2SIngo Molnaridtentry double_fault do_double_fault has_error_code=1 paranoid=2 933905a36a2SIngo Molnaridtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 934905a36a2SIngo Molnaridtentry invalid_TSS do_invalid_TSS has_error_code=1 935905a36a2SIngo Molnaridtentry segment_not_present do_segment_not_present has_error_code=1 936905a36a2SIngo Molnaridtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 937905a36a2SIngo Molnaridtentry coprocessor_error do_coprocessor_error has_error_code=0 938905a36a2SIngo Molnaridtentry alignment_check do_alignment_check has_error_code=1 939905a36a2SIngo Molnaridtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 940905a36a2SIngo Molnar 941905a36a2SIngo Molnar 9424d732138SIngo Molnar /* 9434d732138SIngo Molnar * Reload gs selector with exception handling 9444d732138SIngo Molnar * edi: new selector 9454d732138SIngo Molnar */ 946905a36a2SIngo MolnarENTRY(native_load_gs_index) 9478c1f7558SJosh Poimboeuf FRAME_BEGIN 948905a36a2SIngo Molnar pushfq 949905a36a2SIngo Molnar DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 950*ca37e57bSAndy Lutomirski TRACE_IRQS_OFF 951905a36a2SIngo Molnar SWAPGS 95242c748bbSBorislav Petkov.Lgs_change: 953905a36a2SIngo Molnar movl %edi, %gs 95496e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 955905a36a2SIngo Molnar SWAPGS 956*ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS (%rsp) 957905a36a2SIngo Molnar popfq 9588c1f7558SJosh Poimboeuf FRAME_END 959905a36a2SIngo Molnar ret 9608c1f7558SJosh PoimboeufENDPROC(native_load_gs_index) 961784d5699SAl ViroEXPORT_SYMBOL(native_load_gs_index) 962905a36a2SIngo Molnar 96342c748bbSBorislav Petkov _ASM_EXTABLE(.Lgs_change, bad_gs) 964905a36a2SIngo Molnar .section .fixup, "ax" 965905a36a2SIngo Molnar /* running with kernelgs */ 966905a36a2SIngo Molnarbad_gs: 967905a36a2SIngo Molnar SWAPGS /* switch back to user gs */ 968b038c842SAndy Lutomirski.macro ZAP_GS 969b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 970b038c842SAndy Lutomirski movl $__USER_DS, %eax 971b038c842SAndy Lutomirski movl %eax, %gs 972b038c842SAndy Lutomirski.endm 973b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 974905a36a2SIngo Molnar xorl %eax, %eax 975905a36a2SIngo Molnar movl %eax, %gs 976905a36a2SIngo Molnar jmp 2b 977905a36a2SIngo Molnar .previous 978905a36a2SIngo Molnar 979905a36a2SIngo Molnar/* Call softirq on interrupt stack. Interrupts are off. */ 980905a36a2SIngo MolnarENTRY(do_softirq_own_stack) 981905a36a2SIngo Molnar pushq %rbp 982905a36a2SIngo Molnar mov %rsp, %rbp 9838c1f7558SJosh Poimboeuf ENTER_IRQ_STACK regs=0 old_rsp=%r11 984905a36a2SIngo Molnar call __do_softirq 9858c1f7558SJosh Poimboeuf LEAVE_IRQ_STACK regs=0 986905a36a2SIngo Molnar leaveq 987905a36a2SIngo Molnar ret 9888c1f7558SJosh PoimboeufENDPROC(do_softirq_own_stack) 989905a36a2SIngo Molnar 990905a36a2SIngo Molnar#ifdef CONFIG_XEN 9915878d5d6SJuergen Grossidtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 992905a36a2SIngo Molnar 993905a36a2SIngo Molnar/* 994905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 995905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 996905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 997905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 998905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 999905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 1000905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 1001905a36a2SIngo Molnar * like to avoid the possibility. 1002905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 1003905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 1004905a36a2SIngo Molnar * activation and restart the handler using the previous one. 1005905a36a2SIngo Molnar */ 10064d732138SIngo MolnarENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 10074d732138SIngo Molnar 1008905a36a2SIngo Molnar/* 1009905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1010905a36a2SIngo Molnar * see the correct pointer to the pt_regs 1011905a36a2SIngo Molnar */ 10128c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 10134d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 10148c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 10151d3e53e8SAndy Lutomirski 10161d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%r10 1017905a36a2SIngo Molnar call xen_evtchn_do_upcall 10181d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 10191d3e53e8SAndy Lutomirski 1020905a36a2SIngo Molnar#ifndef CONFIG_PREEMPT 1021905a36a2SIngo Molnar call xen_maybe_preempt_hcall 1022905a36a2SIngo Molnar#endif 1023905a36a2SIngo Molnar jmp error_exit 1024905a36a2SIngo MolnarEND(xen_do_hypervisor_callback) 1025905a36a2SIngo Molnar 1026905a36a2SIngo Molnar/* 1027905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 1028905a36a2SIngo Molnar * We get here for two reasons: 1029905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 1030905a36a2SIngo Molnar * 2. Fault while executing IRET 1031905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 1032905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 1033905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 1034905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 1035905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1036905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 1037905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 1038905a36a2SIngo Molnar */ 1039905a36a2SIngo MolnarENTRY(xen_failsafe_callback) 10408c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1041905a36a2SIngo Molnar movl %ds, %ecx 1042905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 1043905a36a2SIngo Molnar jne 1f 1044905a36a2SIngo Molnar movl %es, %ecx 1045905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 1046905a36a2SIngo Molnar jne 1f 1047905a36a2SIngo Molnar movl %fs, %ecx 1048905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 1049905a36a2SIngo Molnar jne 1f 1050905a36a2SIngo Molnar movl %gs, %ecx 1051905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 1052905a36a2SIngo Molnar jne 1f 1053905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 1054905a36a2SIngo Molnar movq (%rsp), %rcx 1055905a36a2SIngo Molnar movq 8(%rsp), %r11 1056905a36a2SIngo Molnar addq $0x30, %rsp 1057905a36a2SIngo Molnar pushq $0 /* RIP */ 10588c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 1059905a36a2SIngo Molnar jmp general_protection 1060905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1061905a36a2SIngo Molnar movq (%rsp), %rcx 1062905a36a2SIngo Molnar movq 8(%rsp), %r11 1063905a36a2SIngo Molnar addq $0x30, %rsp 10648c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1065905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 1066905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1067905a36a2SIngo Molnar SAVE_C_REGS 1068905a36a2SIngo Molnar SAVE_EXTRA_REGS 1069946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 1070905a36a2SIngo Molnar jmp error_exit 1071905a36a2SIngo MolnarEND(xen_failsafe_callback) 1072905a36a2SIngo Molnar 1073905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1074905a36a2SIngo Molnar xen_hvm_callback_vector xen_evtchn_do_upcall 1075905a36a2SIngo Molnar 1076905a36a2SIngo Molnar#endif /* CONFIG_XEN */ 1077905a36a2SIngo Molnar 1078905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV) 1079905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1080905a36a2SIngo Molnar hyperv_callback_vector hyperv_vector_handler 1081905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */ 1082905a36a2SIngo Molnar 1083905a36a2SIngo Molnaridtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1084905a36a2SIngo Molnaridtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1085905a36a2SIngo Molnaridtentry stack_segment do_stack_segment has_error_code=1 10864d732138SIngo Molnar 1087905a36a2SIngo Molnar#ifdef CONFIG_XEN 108843e41110SJuergen Grossidtentry xennmi do_nmi has_error_code=0 10895878d5d6SJuergen Grossidtentry xendebug do_debug has_error_code=0 10905878d5d6SJuergen Grossidtentry xenint3 do_int3 has_error_code=0 1091905a36a2SIngo Molnar#endif 10924d732138SIngo Molnar 1093905a36a2SIngo Molnaridtentry general_protection do_general_protection has_error_code=1 109411a7ffb0SThomas Gleixneridtentry page_fault do_page_fault has_error_code=1 10954d732138SIngo Molnar 1096905a36a2SIngo Molnar#ifdef CONFIG_KVM_GUEST 1097905a36a2SIngo Molnaridtentry async_page_fault do_async_page_fault has_error_code=1 1098905a36a2SIngo Molnar#endif 10994d732138SIngo Molnar 1100905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE 1101905a36a2SIngo Molnaridtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1102905a36a2SIngo Molnar#endif 1103905a36a2SIngo Molnar 1104905a36a2SIngo Molnar/* 1105905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1106905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check. 1107905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1108905a36a2SIngo Molnar */ 1109905a36a2SIngo MolnarENTRY(paranoid_entry) 11108c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1111905a36a2SIngo Molnar cld 1112905a36a2SIngo Molnar SAVE_C_REGS 8 1113905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1114946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1115905a36a2SIngo Molnar movl $1, %ebx 1116905a36a2SIngo Molnar movl $MSR_GS_BASE, %ecx 1117905a36a2SIngo Molnar rdmsr 1118905a36a2SIngo Molnar testl %edx, %edx 1119905a36a2SIngo Molnar js 1f /* negative -> in kernel */ 1120905a36a2SIngo Molnar SWAPGS 1121905a36a2SIngo Molnar xorl %ebx, %ebx 1122905a36a2SIngo Molnar1: ret 1123905a36a2SIngo MolnarEND(paranoid_entry) 1124905a36a2SIngo Molnar 1125905a36a2SIngo Molnar/* 1126905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 1127905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 1128905a36a2SIngo Molnar * from kernel space. 1129905a36a2SIngo Molnar * 1130905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 1131905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 1132905a36a2SIngo Molnar * be complicated. Fortunately, we there's no good reason 1133905a36a2SIngo Molnar * to try to handle preemption here. 11344d732138SIngo Molnar * 11354d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1136905a36a2SIngo Molnar */ 1137905a36a2SIngo MolnarENTRY(paranoid_exit) 11388c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 11392140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1140905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG 1141905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1142e5317832SAndy Lutomirski jnz .Lparanoid_exit_no_swapgs 1143905a36a2SIngo Molnar TRACE_IRQS_IRETQ 1144905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1145e5317832SAndy Lutomirski jmp .Lparanoid_exit_restore 1146e5317832SAndy Lutomirski.Lparanoid_exit_no_swapgs: 1147905a36a2SIngo Molnar TRACE_IRQS_IRETQ_DEBUG 1148e5317832SAndy Lutomirski.Lparanoid_exit_restore: 1149e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 1150905a36a2SIngo MolnarEND(paranoid_exit) 1151905a36a2SIngo Molnar 1152905a36a2SIngo Molnar/* 1153905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1154539f5113SAndy Lutomirski * Return: EBX=0: came from user mode; EBX=1: otherwise 1155905a36a2SIngo Molnar */ 1156905a36a2SIngo MolnarENTRY(error_entry) 11578c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1158905a36a2SIngo Molnar cld 1159905a36a2SIngo Molnar SAVE_C_REGS 8 1160905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1161946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1162905a36a2SIngo Molnar xorl %ebx, %ebx 1163905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1164cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1165539f5113SAndy Lutomirski 1166cb6f64edSAndy Lutomirski /* 1167cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1168cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1169cb6f64edSAndy Lutomirski */ 1170905a36a2SIngo Molnar SWAPGS 1171539f5113SAndy Lutomirski 1172cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 1173f1075053SAndy Lutomirski /* 1174f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 1175f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 1176f1075053SAndy Lutomirski * (which can take locks). 1177f1075053SAndy Lutomirski */ 1178f1075053SAndy Lutomirski TRACE_IRQS_OFF 1179478dc89cSAndy Lutomirski CALL_enter_from_user_mode 1180f1075053SAndy Lutomirski ret 118102bc7768SAndy Lutomirski 1182cb6f64edSAndy Lutomirski.Lerror_entry_done: 1183905a36a2SIngo Molnar TRACE_IRQS_OFF 1184905a36a2SIngo Molnar ret 1185905a36a2SIngo Molnar 1186905a36a2SIngo Molnar /* 1187905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1188905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1189905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1190905a36a2SIngo Molnar * for these here too. 1191905a36a2SIngo Molnar */ 1192cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1193905a36a2SIngo Molnar incl %ebx 1194905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1195905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1196cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1197905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1198905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1199cb6f64edSAndy Lutomirski je .Lbstep_iret 120042c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 1201cb6f64edSAndy Lutomirski jne .Lerror_entry_done 1202539f5113SAndy Lutomirski 1203539f5113SAndy Lutomirski /* 120442c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1205539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 120642c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1207539f5113SAndy Lutomirski */ 12082fa5f04fSWanpeng Li SWAPGS 12092fa5f04fSWanpeng Li jmp .Lerror_entry_done 1210905a36a2SIngo Molnar 1211cb6f64edSAndy Lutomirski.Lbstep_iret: 1212905a36a2SIngo Molnar /* Fix truncated RIP */ 1213905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1214905a36a2SIngo Molnar /* fall through */ 1215905a36a2SIngo Molnar 1216cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1217539f5113SAndy Lutomirski /* 1218539f5113SAndy Lutomirski * We came from an IRET to user mode, so we have user gsbase. 1219539f5113SAndy Lutomirski * Switch to kernel gsbase: 1220539f5113SAndy Lutomirski */ 1221905a36a2SIngo Molnar SWAPGS 1222539f5113SAndy Lutomirski 1223539f5113SAndy Lutomirski /* 1224539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1225539f5113SAndy Lutomirski * as if we faulted immediately after IRET and clear EBX so that 1226539f5113SAndy Lutomirski * error_exit knows that we will be returning to user mode. 1227539f5113SAndy Lutomirski */ 1228905a36a2SIngo Molnar mov %rsp, %rdi 1229905a36a2SIngo Molnar call fixup_bad_iret 1230905a36a2SIngo Molnar mov %rax, %rsp 1231539f5113SAndy Lutomirski decl %ebx 1232cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1233905a36a2SIngo MolnarEND(error_entry) 1234905a36a2SIngo Molnar 1235905a36a2SIngo Molnar 1236539f5113SAndy Lutomirski/* 123775ca5b22SNicolas Iooss * On entry, EBX is a "return to kernel mode" flag: 1238539f5113SAndy Lutomirski * 1: already in kernel mode, don't need SWAPGS 1239539f5113SAndy Lutomirski * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1240539f5113SAndy Lutomirski */ 1241905a36a2SIngo MolnarENTRY(error_exit) 12428c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 12432140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1244905a36a2SIngo Molnar TRACE_IRQS_OFF 12452140a994SJan Beulich testl %ebx, %ebx 1246905a36a2SIngo Molnar jnz retint_kernel 1247905a36a2SIngo Molnar jmp retint_user 1248905a36a2SIngo MolnarEND(error_exit) 1249905a36a2SIngo Molnar 1250929bacecSAndy Lutomirski/* 1251929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1252929bacecSAndy Lutomirski * so we can use real assembly here. 1253929bacecSAndy Lutomirski */ 1254905a36a2SIngo MolnarENTRY(nmi) 12558c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1256929bacecSAndy Lutomirski 1257fc57a7c6SAndy Lutomirski /* 1258905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1259905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1260905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1261905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1262905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1263905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1264905a36a2SIngo Molnar * anyway. 1265905a36a2SIngo Molnar * 1266905a36a2SIngo Molnar * To handle this case we do the following: 1267905a36a2SIngo Molnar * Check the a special location on the stack that contains 1268905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1269905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1270905a36a2SIngo Molnar * is an NMI stack. 1271905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1272905a36a2SIngo Molnar * stack then: 1273905a36a2SIngo Molnar * o Set the special variable on the stack 12740b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 12750b22930eSAndy Lutomirski * stack 12760b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1277905a36a2SIngo Molnar * o Continue processing the NMI 1278905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 12790b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1280905a36a2SIngo Molnar * o return back to the first NMI 1281905a36a2SIngo Molnar * 1282905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1283905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1284905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1285905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1286905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1287905a36a2SIngo Molnar * NMI. 12889b6e6a83SAndy Lutomirski * 12899b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 12909b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 12919b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 12929b6e6a83SAndy Lutomirski * other IST entries. 1293905a36a2SIngo Molnar */ 1294905a36a2SIngo Molnar 1295e93c1730SAndy Lutomirski ASM_CLAC 1296e93c1730SAndy Lutomirski 1297905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1298905a36a2SIngo Molnar pushq %rdx 1299905a36a2SIngo Molnar 13009b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 13019b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1302905a36a2SIngo Molnar 1303905a36a2SIngo Molnar /* 13049b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 13059b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 13069b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 13079b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 13089b6e6a83SAndy Lutomirski * are off. 130983c133cfSAndy Lutomirski * 131083c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 131183c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 13129b6e6a83SAndy Lutomirski */ 13139b6e6a83SAndy Lutomirski 1314929bacecSAndy Lutomirski swapgs 13159b6e6a83SAndy Lutomirski cld 13169b6e6a83SAndy Lutomirski movq %rsp, %rdx 13179b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 13188c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 13199b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 13209b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 13219b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 13229b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 13239b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 13248c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 13259b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 13269b6e6a83SAndy Lutomirski pushq %rdi /* pt_regs->di */ 13279b6e6a83SAndy Lutomirski pushq %rsi /* pt_regs->si */ 13289b6e6a83SAndy Lutomirski pushq (%rdx) /* pt_regs->dx */ 13299b6e6a83SAndy Lutomirski pushq %rcx /* pt_regs->cx */ 13309b6e6a83SAndy Lutomirski pushq %rax /* pt_regs->ax */ 13319b6e6a83SAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 13329b6e6a83SAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 13339b6e6a83SAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 13349b6e6a83SAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 13359b6e6a83SAndy Lutomirski pushq %rbx /* pt_regs->rbx */ 13369b6e6a83SAndy Lutomirski pushq %rbp /* pt_regs->rbp */ 13379b6e6a83SAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 13389b6e6a83SAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 13399b6e6a83SAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 13409b6e6a83SAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 13418c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1342946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 13439b6e6a83SAndy Lutomirski 13449b6e6a83SAndy Lutomirski /* 13459b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 13469b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 13479b6e6a83SAndy Lutomirski * done with the NMI stack. 13489b6e6a83SAndy Lutomirski */ 13499b6e6a83SAndy Lutomirski 13509b6e6a83SAndy Lutomirski movq %rsp, %rdi 13519b6e6a83SAndy Lutomirski movq $-1, %rsi 13529b6e6a83SAndy Lutomirski call do_nmi 13539b6e6a83SAndy Lutomirski 13549b6e6a83SAndy Lutomirski /* 13559b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1356946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 13579b6e6a83SAndy Lutomirski */ 13588a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 13599b6e6a83SAndy Lutomirski 13609b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 13619b6e6a83SAndy Lutomirski /* 13620b22930eSAndy Lutomirski * Here's what our stack frame will look like: 13630b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13640b22930eSAndy Lutomirski * | original SS | 13650b22930eSAndy Lutomirski * | original Return RSP | 13660b22930eSAndy Lutomirski * | original RFLAGS | 13670b22930eSAndy Lutomirski * | original CS | 13680b22930eSAndy Lutomirski * | original RIP | 13690b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13700b22930eSAndy Lutomirski * | temp storage for rdx | 13710b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13720b22930eSAndy Lutomirski * | "NMI executing" variable | 13730b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13740b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 13750b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 13760b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 13770b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 13780b22930eSAndy Lutomirski * | iret RIP } | 13790b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13800b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 13810b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 13820b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 13830b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 13840b22930eSAndy Lutomirski * | outermost RIP } iteration. | 13850b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13860b22930eSAndy Lutomirski * | pt_regs | 13870b22930eSAndy Lutomirski * +---------------------------------------------------------+ 13880b22930eSAndy Lutomirski * 13890b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 13900b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 13910b22930eSAndy Lutomirski * space for the asm code here. 13920b22930eSAndy Lutomirski * 13930b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 13940b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 13950b22930eSAndy Lutomirski * processing. 13960b22930eSAndy Lutomirski * 13970b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 13980b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 13990b22930eSAndy Lutomirski * frame pointing to the final return target. 14000b22930eSAndy Lutomirski */ 14010b22930eSAndy Lutomirski 14020b22930eSAndy Lutomirski /* 14030b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 14040b22930eSAndy Lutomirski * 1405a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1406a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1407a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1408a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 1409a27507caSAndy Lutomirski * about to about to call do_nmi anyway, so we can just 1410a27507caSAndy Lutomirski * resume the outer NMI. 1411a27507caSAndy Lutomirski */ 1412a27507caSAndy Lutomirski 1413a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1414a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1415a27507caSAndy Lutomirski ja 1f 1416a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1417a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1418a27507caSAndy Lutomirski ja nested_nmi_out 1419a27507caSAndy Lutomirski1: 1420a27507caSAndy Lutomirski 1421a27507caSAndy Lutomirski /* 1422a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 14230b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 14240b22930eSAndy Lutomirski * before IRET. 1425905a36a2SIngo Molnar */ 1426905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1427905a36a2SIngo Molnar je nested_nmi 1428905a36a2SIngo Molnar 1429905a36a2SIngo Molnar /* 14300b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 14310b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1432810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1433810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1434810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1435810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1436810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1437810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1438810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1439810bc075SAndy Lutomirski * "NMI executing". 1440905a36a2SIngo Molnar */ 1441905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1442905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1443905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1444905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1445905a36a2SIngo Molnar ja first_nmi 14464d732138SIngo Molnar 1447905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1448905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1449905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1450905a36a2SIngo Molnar jb first_nmi 1451810bc075SAndy Lutomirski 1452810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1453810bc075SAndy Lutomirski 1454810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1455810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1456810bc075SAndy Lutomirski 1457810bc075SAndy Lutomirski /* This is a nested NMI. */ 1458905a36a2SIngo Molnar 1459905a36a2SIngo Molnarnested_nmi: 1460905a36a2SIngo Molnar /* 14610b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 14620b22930eSAndy Lutomirski * iteration of NMI handling. 1463905a36a2SIngo Molnar */ 146423a781e9SAndy Lutomirski subq $8, %rsp 1465905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1466905a36a2SIngo Molnar pushq $__KERNEL_DS 1467905a36a2SIngo Molnar pushq %rdx 1468905a36a2SIngo Molnar pushfq 1469905a36a2SIngo Molnar pushq $__KERNEL_CS 1470905a36a2SIngo Molnar pushq $repeat_nmi 1471905a36a2SIngo Molnar 1472905a36a2SIngo Molnar /* Put stack back */ 1473905a36a2SIngo Molnar addq $(6*8), %rsp 1474905a36a2SIngo Molnar 1475905a36a2SIngo Molnarnested_nmi_out: 1476905a36a2SIngo Molnar popq %rdx 1477905a36a2SIngo Molnar 14780b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1479929bacecSAndy Lutomirski iretq 1480905a36a2SIngo Molnar 1481905a36a2SIngo Molnarfirst_nmi: 14820b22930eSAndy Lutomirski /* Restore rdx. */ 1483905a36a2SIngo Molnar movq (%rsp), %rdx 1484905a36a2SIngo Molnar 148536f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 148636f1a77bSAndy Lutomirski pushq $0 1487905a36a2SIngo Molnar 14880b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1489905a36a2SIngo Molnar subq $(5*8), %rsp 1490905a36a2SIngo Molnar 14910b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1492905a36a2SIngo Molnar .rept 5 1493905a36a2SIngo Molnar pushq 11*8(%rsp) 1494905a36a2SIngo Molnar .endr 14958c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1496905a36a2SIngo Molnar 1497905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1498905a36a2SIngo Molnar 1499a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1500a97439aaSAndy Lutomirski /* 1501a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1502a97439aaSAndy Lutomirski * default because IRET is very expensive. 1503a97439aaSAndy Lutomirski */ 1504a97439aaSAndy Lutomirski pushq $0 /* SS */ 1505a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1506a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1507a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1508a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1509a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1510929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 15118c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1512a97439aaSAndy Lutomirski1: 1513a97439aaSAndy Lutomirski#endif 1514a97439aaSAndy Lutomirski 15150b22930eSAndy Lutomirskirepeat_nmi: 1516905a36a2SIngo Molnar /* 1517905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1518905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1519905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1520905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1521905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1522905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1523905a36a2SIngo Molnar * NMI will update. 15240b22930eSAndy Lutomirski * 15250b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 15260b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 15270b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 152836f1a77bSAndy Lutomirski * gsbase if needed before we call do_nmi. "NMI executing" 152936f1a77bSAndy Lutomirski * is zero. 1530905a36a2SIngo Molnar */ 153136f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1532905a36a2SIngo Molnar 15330b22930eSAndy Lutomirski /* 15340b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 15350b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 15360b22930eSAndy Lutomirski * it or it will end up containing garbage. 15370b22930eSAndy Lutomirski */ 1538905a36a2SIngo Molnar addq $(10*8), %rsp 1539905a36a2SIngo Molnar .rept 5 1540905a36a2SIngo Molnar pushq -6*8(%rsp) 1541905a36a2SIngo Molnar .endr 1542905a36a2SIngo Molnar subq $(5*8), %rsp 1543905a36a2SIngo Molnarend_repeat_nmi: 1544905a36a2SIngo Molnar 1545905a36a2SIngo Molnar /* 15460b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 15470b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 15480b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1549905a36a2SIngo Molnar */ 1550905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1551905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1552905a36a2SIngo Molnar 1553905a36a2SIngo Molnar /* 1554905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1555905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1556905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1557905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1558905a36a2SIngo Molnar * exceptions might do. 1559905a36a2SIngo Molnar */ 1560905a36a2SIngo Molnar call paranoid_entry 15618c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1562905a36a2SIngo Molnar 1563905a36a2SIngo Molnar /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1564905a36a2SIngo Molnar movq %rsp, %rdi 1565905a36a2SIngo Molnar movq $-1, %rsi 1566905a36a2SIngo Molnar call do_nmi 1567905a36a2SIngo Molnar 1568905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1569905a36a2SIngo Molnar jnz nmi_restore 1570905a36a2SIngo Molnarnmi_swapgs: 1571905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1572905a36a2SIngo Molnarnmi_restore: 1573471ee483SAndy Lutomirski POP_EXTRA_REGS 1574471ee483SAndy Lutomirski POP_C_REGS 15750b22930eSAndy Lutomirski 1576471ee483SAndy Lutomirski /* 1577471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1578471ee483SAndy Lutomirski * at the "iret" frame. 1579471ee483SAndy Lutomirski */ 1580471ee483SAndy Lutomirski addq $6*8, %rsp 1581905a36a2SIngo Molnar 1582810bc075SAndy Lutomirski /* 1583810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1584810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1585929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1586929bacecSAndy Lutomirski * 1587929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1588929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1589929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1590810bc075SAndy Lutomirski */ 1591810bc075SAndy Lutomirski std 1592810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 15930b22930eSAndy Lutomirski 15940b22930eSAndy Lutomirski /* 1595929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1596929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1597929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1598929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 15990b22930eSAndy Lutomirski */ 1600929bacecSAndy Lutomirski iretq 1601905a36a2SIngo MolnarEND(nmi) 1602905a36a2SIngo Molnar 1603905a36a2SIngo MolnarENTRY(ignore_sysret) 16048c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1605905a36a2SIngo Molnar mov $-ENOSYS, %eax 1606905a36a2SIngo Molnar sysret 1607905a36a2SIngo MolnarEND(ignore_sysret) 16082deb4be2SAndy Lutomirski 16092deb4be2SAndy LutomirskiENTRY(rewind_stack_do_exit) 16108c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 16112deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 16122deb4be2SAndy Lutomirski xorl %ebp, %ebp 16132deb4be2SAndy Lutomirski 16142deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 16158c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 16168c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 16172deb4be2SAndy Lutomirski 16182deb4be2SAndy Lutomirski call do_exit 16192deb4be2SAndy LutomirskiEND(rewind_stack_do_exit) 1620