1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11905a36a2SIngo Molnar * Some of this is documented in Documentation/x86/entry_64.txt 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 184d732138SIngo Molnar * - ENTRY/END: Define functions in the symbol table. 194d732138SIngo Molnar * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 204d732138SIngo Molnar * - idtentry: Define exception entry points. 21905a36a2SIngo Molnar */ 22905a36a2SIngo Molnar#include <linux/linkage.h> 23905a36a2SIngo Molnar#include <asm/segment.h> 24905a36a2SIngo Molnar#include <asm/cache.h> 25905a36a2SIngo Molnar#include <asm/errno.h> 26905a36a2SIngo Molnar#include <asm/asm-offsets.h> 27905a36a2SIngo Molnar#include <asm/msr.h> 28905a36a2SIngo Molnar#include <asm/unistd.h> 29905a36a2SIngo Molnar#include <asm/thread_info.h> 30905a36a2SIngo Molnar#include <asm/hw_irq.h> 31905a36a2SIngo Molnar#include <asm/page_types.h> 32905a36a2SIngo Molnar#include <asm/irqflags.h> 33905a36a2SIngo Molnar#include <asm/paravirt.h> 34905a36a2SIngo Molnar#include <asm/percpu.h> 35905a36a2SIngo Molnar#include <asm/asm.h> 36905a36a2SIngo Molnar#include <asm/smap.h> 37905a36a2SIngo Molnar#include <asm/pgtable_types.h> 38784d5699SAl Viro#include <asm/export.h> 398c1f7558SJosh Poimboeuf#include <asm/frame.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41905a36a2SIngo Molnar#include <linux/err.h> 42905a36a2SIngo Molnar 436fd166aaSPeter Zijlstra#include "calling.h" 446fd166aaSPeter Zijlstra 45905a36a2SIngo Molnar.code64 46905a36a2SIngo Molnar.section .entry.text, "ax" 47905a36a2SIngo Molnar 48905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT 49905a36a2SIngo MolnarENTRY(native_usergs_sysret64) 508c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 51905a36a2SIngo Molnar swapgs 52905a36a2SIngo Molnar sysretq 538c1f7558SJosh PoimboeufEND(native_usergs_sysret64) 54905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */ 55905a36a2SIngo Molnar 56ca37e57bSAndy Lutomirski.macro TRACE_IRQS_FLAGS flags:req 57905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS 58ca37e57bSAndy Lutomirski bt $9, \flags /* interrupts off? */ 59905a36a2SIngo Molnar jnc 1f 60905a36a2SIngo Molnar TRACE_IRQS_ON 61905a36a2SIngo Molnar1: 62905a36a2SIngo Molnar#endif 63905a36a2SIngo Molnar.endm 64905a36a2SIngo Molnar 65ca37e57bSAndy Lutomirski.macro TRACE_IRQS_IRETQ 66ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS EFLAGS(%rsp) 67ca37e57bSAndy Lutomirski.endm 68ca37e57bSAndy Lutomirski 69905a36a2SIngo Molnar/* 70905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint 71905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update 72905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time 73905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler 74905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 75905a36a2SIngo Molnar * 76905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 77905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top 78905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack. 79905a36a2SIngo Molnar */ 80905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 81905a36a2SIngo Molnar 82905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG 83905a36a2SIngo Molnar call debug_stack_set_zero 84905a36a2SIngo Molnar TRACE_IRQS_OFF 85905a36a2SIngo Molnar call debug_stack_reset 86905a36a2SIngo Molnar.endm 87905a36a2SIngo Molnar 88905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG 89905a36a2SIngo Molnar call debug_stack_set_zero 90905a36a2SIngo Molnar TRACE_IRQS_ON 91905a36a2SIngo Molnar call debug_stack_reset 92905a36a2SIngo Molnar.endm 93905a36a2SIngo Molnar 94905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG 95905a36a2SIngo Molnar bt $9, EFLAGS(%rsp) /* interrupts off? */ 96905a36a2SIngo Molnar jnc 1f 97905a36a2SIngo Molnar TRACE_IRQS_ON_DEBUG 98905a36a2SIngo Molnar1: 99905a36a2SIngo Molnar.endm 100905a36a2SIngo Molnar 101905a36a2SIngo Molnar#else 102905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 103905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 104905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 105905a36a2SIngo Molnar#endif 106905a36a2SIngo Molnar 107905a36a2SIngo Molnar/* 1084d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 109905a36a2SIngo Molnar * 110fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 111fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 112fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 113fda57b22SAndy Lutomirski * available when SYSCALL is used. 114fda57b22SAndy Lutomirski * 115fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 116fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 117fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 118fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 119fda57b22SAndy Lutomirski * 1204d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 121905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 122905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 123905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 124905a36a2SIngo Molnar * and does not change rsp. 125905a36a2SIngo Molnar * 126905a36a2SIngo Molnar * Registers on entry: 127905a36a2SIngo Molnar * rax system call number 128905a36a2SIngo Molnar * rcx return address 129905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 130905a36a2SIngo Molnar * rdi arg0 131905a36a2SIngo Molnar * rsi arg1 132905a36a2SIngo Molnar * rdx arg2 133905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 134905a36a2SIngo Molnar * r8 arg4 135905a36a2SIngo Molnar * r9 arg5 136905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 137905a36a2SIngo Molnar * 138905a36a2SIngo Molnar * Only called from user space. 139905a36a2SIngo Molnar * 140905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 141905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 142905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 143905a36a2SIngo Molnar */ 144905a36a2SIngo Molnar 1453386bc8aSAndy Lutomirski .pushsection .entry_trampoline, "ax" 1463386bc8aSAndy Lutomirski 1473386bc8aSAndy Lutomirski/* 1483386bc8aSAndy Lutomirski * The code in here gets remapped into cpu_entry_area's trampoline. This means 1493386bc8aSAndy Lutomirski * that the assembler and linker have the wrong idea as to where this code 1503386bc8aSAndy Lutomirski * lives (and, in fact, it's mapped more than once, so it's not even at a 1513386bc8aSAndy Lutomirski * fixed address). So we can't reference any symbols outside the entry 1523386bc8aSAndy Lutomirski * trampoline and expect it to work. 1533386bc8aSAndy Lutomirski * 1543386bc8aSAndy Lutomirski * Instead, we carefully abuse %rip-relative addressing. 1553386bc8aSAndy Lutomirski * _entry_trampoline(%rip) refers to the start of the remapped) entry 1563386bc8aSAndy Lutomirski * trampoline. We can thus find cpu_entry_area with this macro: 1573386bc8aSAndy Lutomirski */ 1583386bc8aSAndy Lutomirski 1593386bc8aSAndy Lutomirski#define CPU_ENTRY_AREA \ 1603386bc8aSAndy Lutomirski _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) 1613386bc8aSAndy Lutomirski 1623386bc8aSAndy Lutomirski/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ 1634fe2d8b1SDave Hansen#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ 1644fe2d8b1SDave Hansen SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA 1653386bc8aSAndy Lutomirski 1663386bc8aSAndy LutomirskiENTRY(entry_SYSCALL_64_trampoline) 1673386bc8aSAndy Lutomirski UNWIND_HINT_EMPTY 1683386bc8aSAndy Lutomirski swapgs 1693386bc8aSAndy Lutomirski 1703386bc8aSAndy Lutomirski /* Stash the user RSP. */ 1713386bc8aSAndy Lutomirski movq %rsp, RSP_SCRATCH 1723386bc8aSAndy Lutomirski 1738a09317bSDave Hansen /* Note: using %rsp as a scratch reg. */ 1748a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 1758a09317bSDave Hansen 1763386bc8aSAndy Lutomirski /* Load the top of the task stack into RSP */ 1773386bc8aSAndy Lutomirski movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp 1783386bc8aSAndy Lutomirski 1793386bc8aSAndy Lutomirski /* Start building the simulated IRET frame. */ 1803386bc8aSAndy Lutomirski pushq $__USER_DS /* pt_regs->ss */ 1813386bc8aSAndy Lutomirski pushq RSP_SCRATCH /* pt_regs->sp */ 1823386bc8aSAndy Lutomirski pushq %r11 /* pt_regs->flags */ 1833386bc8aSAndy Lutomirski pushq $__USER_CS /* pt_regs->cs */ 1843386bc8aSAndy Lutomirski pushq %rcx /* pt_regs->ip */ 1853386bc8aSAndy Lutomirski 1863386bc8aSAndy Lutomirski /* 1873386bc8aSAndy Lutomirski * x86 lacks a near absolute jump, and we can't jump to the real 1883386bc8aSAndy Lutomirski * entry text with a relative jump. We could push the target 1893386bc8aSAndy Lutomirski * address and then use retq, but this destroys the pipeline on 1903386bc8aSAndy Lutomirski * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, 1913386bc8aSAndy Lutomirski * spill RDI and restore it in a second-stage trampoline. 1923386bc8aSAndy Lutomirski */ 1933386bc8aSAndy Lutomirski pushq %rdi 1943386bc8aSAndy Lutomirski movq $entry_SYSCALL_64_stage2, %rdi 1952641f08bSDavid Woodhouse JMP_NOSPEC %rdi 1963386bc8aSAndy LutomirskiEND(entry_SYSCALL_64_trampoline) 1973386bc8aSAndy Lutomirski 1983386bc8aSAndy Lutomirski .popsection 1993386bc8aSAndy Lutomirski 2003386bc8aSAndy LutomirskiENTRY(entry_SYSCALL_64_stage2) 2013386bc8aSAndy Lutomirski UNWIND_HINT_EMPTY 2023386bc8aSAndy Lutomirski popq %rdi 2033386bc8aSAndy Lutomirski jmp entry_SYSCALL_64_after_hwframe 2043386bc8aSAndy LutomirskiEND(entry_SYSCALL_64_stage2) 2053386bc8aSAndy Lutomirski 206b2502b41SIngo MolnarENTRY(entry_SYSCALL_64) 2078c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 208905a36a2SIngo Molnar /* 209905a36a2SIngo Molnar * Interrupts are off on entry. 210905a36a2SIngo Molnar * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 211905a36a2SIngo Molnar * it is too small to ever cause noticeable irq latency. 212905a36a2SIngo Molnar */ 213905a36a2SIngo Molnar 2148a9949bcSAndy Lutomirski swapgs 2158a09317bSDave Hansen /* 2168a09317bSDave Hansen * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it 2178a09317bSDave Hansen * is not required to switch CR3. 2188a09317bSDave Hansen */ 219905a36a2SIngo Molnar movq %rsp, PER_CPU_VAR(rsp_scratch) 220905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 221905a36a2SIngo Molnar 222905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 223905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 224905a36a2SIngo Molnar pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 225905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 226905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 227905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 2288a9949bcSAndy LutomirskiGLOBAL(entry_SYSCALL_64_after_hwframe) 229905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 230905a36a2SIngo Molnar pushq %rdi /* pt_regs->di */ 231905a36a2SIngo Molnar pushq %rsi /* pt_regs->si */ 232905a36a2SIngo Molnar pushq %rdx /* pt_regs->dx */ 233905a36a2SIngo Molnar pushq %rcx /* pt_regs->cx */ 234905a36a2SIngo Molnar pushq $-ENOSYS /* pt_regs->ax */ 235905a36a2SIngo Molnar pushq %r8 /* pt_regs->r8 */ 236905a36a2SIngo Molnar pushq %r9 /* pt_regs->r9 */ 237905a36a2SIngo Molnar pushq %r10 /* pt_regs->r10 */ 238905a36a2SIngo Molnar pushq %r11 /* pt_regs->r11 */ 239905a36a2SIngo Molnar sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ 2408c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 241905a36a2SIngo Molnar 242548c3050SAndy Lutomirski TRACE_IRQS_OFF 243548c3050SAndy Lutomirski 2441e423bffSAndy Lutomirski /* 2451e423bffSAndy Lutomirski * If we need to do entry work or if we guess we'll need to do 2461e423bffSAndy Lutomirski * exit work, go straight to the slow path. 2471e423bffSAndy Lutomirski */ 24815f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 24915f4eae7SAndy Lutomirski testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 2501e423bffSAndy Lutomirski jnz entry_SYSCALL64_slow_path 2511e423bffSAndy Lutomirski 252b2502b41SIngo Molnarentry_SYSCALL_64_fastpath: 2531e423bffSAndy Lutomirski /* 2541e423bffSAndy Lutomirski * Easy case: enable interrupts and issue the syscall. If the syscall 2551e423bffSAndy Lutomirski * needs pt_regs, we'll call a stub that disables interrupts again 2561e423bffSAndy Lutomirski * and jumps to the slow path. 2571e423bffSAndy Lutomirski */ 2581e423bffSAndy Lutomirski TRACE_IRQS_ON 2591e423bffSAndy Lutomirski ENABLE_INTERRUPTS(CLBR_NONE) 260905a36a2SIngo Molnar#if __SYSCALL_MASK == ~0 261905a36a2SIngo Molnar cmpq $__NR_syscall_max, %rax 262905a36a2SIngo Molnar#else 263905a36a2SIngo Molnar andl $__SYSCALL_MASK, %eax 264905a36a2SIngo Molnar cmpl $__NR_syscall_max, %eax 265905a36a2SIngo Molnar#endif 266905a36a2SIngo Molnar ja 1f /* return -ENOSYS (already in pt_regs->ax) */ 267905a36a2SIngo Molnar movq %r10, %rcx 268302f5b26SAndy Lutomirski 269302f5b26SAndy Lutomirski /* 270302f5b26SAndy Lutomirski * This call instruction is handled specially in stub_ptregs_64. 271b7765086SAndy Lutomirski * It might end up jumping to the slow path. If it jumps, RAX 272b7765086SAndy Lutomirski * and all argument registers are clobbered. 273302f5b26SAndy Lutomirski */ 2742641f08bSDavid Woodhouse#ifdef CONFIG_RETPOLINE 2752641f08bSDavid Woodhouse movq sys_call_table(, %rax, 8), %rax 2762641f08bSDavid Woodhouse call __x86_indirect_thunk_rax 2772641f08bSDavid Woodhouse#else 278905a36a2SIngo Molnar call *sys_call_table(, %rax, 8) 2792641f08bSDavid Woodhouse#endif 280302f5b26SAndy Lutomirski.Lentry_SYSCALL_64_after_fastpath_call: 281302f5b26SAndy Lutomirski 282905a36a2SIngo Molnar movq %rax, RAX(%rsp) 283905a36a2SIngo Molnar1: 2841e423bffSAndy Lutomirski 285905a36a2SIngo Molnar /* 2861e423bffSAndy Lutomirski * If we get here, then we know that pt_regs is clean for SYSRET64. 2871e423bffSAndy Lutomirski * If we see that no exit work is required (which we are required 2881e423bffSAndy Lutomirski * to check with IRQs off), then we can go straight to SYSRET64. 289905a36a2SIngo Molnar */ 2902140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 2911e423bffSAndy Lutomirski TRACE_IRQS_OFF 29215f4eae7SAndy Lutomirski movq PER_CPU_VAR(current_task), %r11 29315f4eae7SAndy Lutomirski testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) 2941e423bffSAndy Lutomirski jnz 1f 295905a36a2SIngo Molnar 2961e423bffSAndy Lutomirski LOCKDEP_SYS_EXIT 2971e423bffSAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQs on */ 298eb2a54c3SAndy Lutomirski movq RIP(%rsp), %rcx 299eb2a54c3SAndy Lutomirski movq EFLAGS(%rsp), %r11 300a5122106SAndy Lutomirski addq $6*8, %rsp /* skip extra regs -- they were preserved */ 3018c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 302a5122106SAndy Lutomirski jmp .Lpop_c_regs_except_rcx_r11_and_sysret 303905a36a2SIngo Molnar 3041e423bffSAndy Lutomirski1: 3051e423bffSAndy Lutomirski /* 3061e423bffSAndy Lutomirski * The fast path looked good when we started, but something changed 3071e423bffSAndy Lutomirski * along the way and we need to switch to the slow path. Calling 3081e423bffSAndy Lutomirski * raise(3) will trigger this, for example. IRQs are off. 3091e423bffSAndy Lutomirski */ 31029ea1b25SAndy Lutomirski TRACE_IRQS_ON 3112140a994SJan Beulich ENABLE_INTERRUPTS(CLBR_ANY) 312905a36a2SIngo Molnar SAVE_EXTRA_REGS 31329ea1b25SAndy Lutomirski movq %rsp, %rdi 31429ea1b25SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 3151e423bffSAndy Lutomirski jmp return_from_SYSCALL_64 3161e423bffSAndy Lutomirski 3171e423bffSAndy Lutomirskientry_SYSCALL64_slow_path: 3181e423bffSAndy Lutomirski /* IRQs are off. */ 3191e423bffSAndy Lutomirski SAVE_EXTRA_REGS 3201e423bffSAndy Lutomirski movq %rsp, %rdi 3211e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 3221e423bffSAndy Lutomirski 3231e423bffSAndy Lutomirskireturn_from_SYSCALL_64: 32429ea1b25SAndy Lutomirski TRACE_IRQS_IRETQ /* we're about to change IF */ 325905a36a2SIngo Molnar 326905a36a2SIngo Molnar /* 327905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 3288a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 3298a055d7fSAndy Lutomirski * go to the slow exit path. 330905a36a2SIngo Molnar */ 331905a36a2SIngo Molnar movq RCX(%rsp), %rcx 332905a36a2SIngo Molnar movq RIP(%rsp), %r11 3338a055d7fSAndy Lutomirski 3348a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 3358a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 336905a36a2SIngo Molnar 337905a36a2SIngo Molnar /* 338905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 339905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 340905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 341905a36a2SIngo Molnar * 342905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 343905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 344361b4b58SKirill A. Shutemov * 345cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 346cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 347905a36a2SIngo Molnar */ 348905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 349905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 3504d732138SIngo Molnar 351905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 352905a36a2SIngo Molnar cmpq %rcx, %r11 3538a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 354905a36a2SIngo Molnar 355905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 3568a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 357905a36a2SIngo Molnar 358905a36a2SIngo Molnar movq R11(%rsp), %r11 359905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 3608a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 361905a36a2SIngo Molnar 362905a36a2SIngo Molnar /* 3633e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 3643e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 3653e035305SBorislav Petkov * need to restore it correctly. 3663e035305SBorislav Petkov * 3673e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 3683e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 3693e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 3703e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 3713e035305SBorislav Petkov * this user code: 372905a36a2SIngo Molnar * 373905a36a2SIngo Molnar * movq $stuck_here, %rcx 374905a36a2SIngo Molnar * pushfq 375905a36a2SIngo Molnar * popq %r11 376905a36a2SIngo Molnar * stuck_here: 377905a36a2SIngo Molnar * 378905a36a2SIngo Molnar * would never get past 'stuck_here'. 379905a36a2SIngo Molnar */ 380905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 3818a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 382905a36a2SIngo Molnar 383905a36a2SIngo Molnar /* nothing to check for RSP */ 384905a36a2SIngo Molnar 385905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 3868a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 387905a36a2SIngo Molnar 388905a36a2SIngo Molnar /* 389905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 390905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 391905a36a2SIngo Molnar */ 392905a36a2SIngo Molnarsyscall_return_via_sysret: 393905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 3948c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 3954fbb3910SAndy Lutomirski POP_EXTRA_REGS 396a5122106SAndy Lutomirski.Lpop_c_regs_except_rcx_r11_and_sysret: 3974fbb3910SAndy Lutomirski popq %rsi /* skip r11 */ 3984fbb3910SAndy Lutomirski popq %r10 3994fbb3910SAndy Lutomirski popq %r9 4004fbb3910SAndy Lutomirski popq %r8 4014fbb3910SAndy Lutomirski popq %rax 4024fbb3910SAndy Lutomirski popq %rsi /* skip rcx */ 4034fbb3910SAndy Lutomirski popq %rdx 4044fbb3910SAndy Lutomirski popq %rsi 4053e3b9293SAndy Lutomirski 4063e3b9293SAndy Lutomirski /* 4073e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 4083e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 4093e3b9293SAndy Lutomirski */ 4103e3b9293SAndy Lutomirski movq %rsp, %rdi 411c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 4123e3b9293SAndy Lutomirski 4133e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 4143e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 4153e3b9293SAndy Lutomirski 4163e3b9293SAndy Lutomirski /* 4173e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 4183e3b9293SAndy Lutomirski * We can do future final exit work right here. 4193e3b9293SAndy Lutomirski */ 4206fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 4213e3b9293SAndy Lutomirski 4224fbb3910SAndy Lutomirski popq %rdi 4233e3b9293SAndy Lutomirski popq %rsp 424905a36a2SIngo Molnar USERGS_SYSRET64 425b2502b41SIngo MolnarEND(entry_SYSCALL_64) 426905a36a2SIngo Molnar 427302f5b26SAndy LutomirskiENTRY(stub_ptregs_64) 428302f5b26SAndy Lutomirski /* 429302f5b26SAndy Lutomirski * Syscalls marked as needing ptregs land here. 430b7765086SAndy Lutomirski * If we are on the fast path, we need to save the extra regs, 431b7765086SAndy Lutomirski * which we achieve by trying again on the slow path. If we are on 432b7765086SAndy Lutomirski * the slow path, the extra regs are already saved. 433302f5b26SAndy Lutomirski * 434302f5b26SAndy Lutomirski * RAX stores a pointer to the C function implementing the syscall. 435b7765086SAndy Lutomirski * IRQs are on. 436302f5b26SAndy Lutomirski */ 437302f5b26SAndy Lutomirski cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) 438302f5b26SAndy Lutomirski jne 1f 439302f5b26SAndy Lutomirski 440b7765086SAndy Lutomirski /* 441b7765086SAndy Lutomirski * Called from fast path -- disable IRQs again, pop return address 442b7765086SAndy Lutomirski * and jump to slow path 443b7765086SAndy Lutomirski */ 4442140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 445b7765086SAndy Lutomirski TRACE_IRQS_OFF 446302f5b26SAndy Lutomirski popq %rax 4478c1f7558SJosh Poimboeuf UNWIND_HINT_REGS extra=0 448b7765086SAndy Lutomirski jmp entry_SYSCALL64_slow_path 449302f5b26SAndy Lutomirski 450302f5b26SAndy Lutomirski1: 4512641f08bSDavid Woodhouse JMP_NOSPEC %rax /* Called from C */ 452302f5b26SAndy LutomirskiEND(stub_ptregs_64) 453302f5b26SAndy Lutomirski 454302f5b26SAndy Lutomirski.macro ptregs_stub func 455302f5b26SAndy LutomirskiENTRY(ptregs_\func) 4568c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 457302f5b26SAndy Lutomirski leaq \func(%rip), %rax 458302f5b26SAndy Lutomirski jmp stub_ptregs_64 459302f5b26SAndy LutomirskiEND(ptregs_\func) 460302f5b26SAndy Lutomirski.endm 461302f5b26SAndy Lutomirski 462302f5b26SAndy Lutomirski/* Instantiate ptregs_stub for each ptregs-using syscall */ 463302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_(sym) 464302f5b26SAndy Lutomirski#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym 465302f5b26SAndy Lutomirski#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) 466302f5b26SAndy Lutomirski#include <asm/syscalls_64.h> 467905a36a2SIngo Molnar 468905a36a2SIngo Molnar/* 4690100301bSBrian Gerst * %rdi: prev task 4700100301bSBrian Gerst * %rsi: next task 4710100301bSBrian Gerst */ 4720100301bSBrian GerstENTRY(__switch_to_asm) 4738c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 4740100301bSBrian Gerst /* 4750100301bSBrian Gerst * Save callee-saved registers 4760100301bSBrian Gerst * This must match the order in inactive_task_frame 4770100301bSBrian Gerst */ 4780100301bSBrian Gerst pushq %rbp 4790100301bSBrian Gerst pushq %rbx 4800100301bSBrian Gerst pushq %r12 4810100301bSBrian Gerst pushq %r13 4820100301bSBrian Gerst pushq %r14 4830100301bSBrian Gerst pushq %r15 4840100301bSBrian Gerst 4850100301bSBrian Gerst /* switch stack */ 4860100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 4870100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 4880100301bSBrian Gerst 4890100301bSBrian Gerst#ifdef CONFIG_CC_STACKPROTECTOR 4900100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 4910100301bSBrian Gerst movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 4920100301bSBrian Gerst#endif 4930100301bSBrian Gerst 494c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 495c995efd5SDavid Woodhouse /* 496c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 497c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 498c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 499c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 500c995efd5SDavid Woodhouse * speculative execution to prevent attack. 501c995efd5SDavid Woodhouse */ 5021dde7415SBorislav Petkov /* Clobbers %rbx */ 5031dde7415SBorislav Petkov FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 504c995efd5SDavid Woodhouse#endif 505c995efd5SDavid Woodhouse 5060100301bSBrian Gerst /* restore callee-saved registers */ 5070100301bSBrian Gerst popq %r15 5080100301bSBrian Gerst popq %r14 5090100301bSBrian Gerst popq %r13 5100100301bSBrian Gerst popq %r12 5110100301bSBrian Gerst popq %rbx 5120100301bSBrian Gerst popq %rbp 5130100301bSBrian Gerst 5140100301bSBrian Gerst jmp __switch_to 5150100301bSBrian GerstEND(__switch_to_asm) 5160100301bSBrian Gerst 5170100301bSBrian Gerst/* 518905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 519905a36a2SIngo Molnar * 5200100301bSBrian Gerst * rax: prev task we switched from 521616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 522616d2483SBrian Gerst * r12: kernel thread arg 523905a36a2SIngo Molnar */ 524905a36a2SIngo MolnarENTRY(ret_from_fork) 5258c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 5260100301bSBrian Gerst movq %rax, %rdi 5274d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 528905a36a2SIngo Molnar 529616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 530616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 531905a36a2SIngo Molnar 532616d2483SBrian Gerst2: 5338c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 534ebd57499SJosh Poimboeuf movq %rsp, %rdi 53524d978b7SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 53624d978b7SAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQS on */ 5378a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 538616d2483SBrian Gerst 539616d2483SBrian Gerst1: 540616d2483SBrian Gerst /* kernel thread */ 541616d2483SBrian Gerst movq %r12, %rdi 5422641f08bSDavid Woodhouse CALL_NOSPEC %rbx 543616d2483SBrian Gerst /* 544616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 545616d2483SBrian Gerst * calling do_execve(). Exit to userspace to complete the execve() 546616d2483SBrian Gerst * syscall. 547616d2483SBrian Gerst */ 548616d2483SBrian Gerst movq $0, RAX(%rsp) 549616d2483SBrian Gerst jmp 2b 550905a36a2SIngo MolnarEND(ret_from_fork) 551905a36a2SIngo Molnar 552905a36a2SIngo Molnar/* 553905a36a2SIngo Molnar * Build the entry stubs with some assembler magic. 554905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block. 555905a36a2SIngo Molnar */ 556905a36a2SIngo Molnar .align 8 557905a36a2SIngo MolnarENTRY(irq_entries_start) 558905a36a2SIngo Molnar vector=FIRST_EXTERNAL_VECTOR 559905a36a2SIngo Molnar .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 5608c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 561905a36a2SIngo Molnar pushq $(~vector+0x80) /* Note: always in signed byte range */ 562905a36a2SIngo Molnar jmp common_interrupt 563905a36a2SIngo Molnar .align 8 5648c1f7558SJosh Poimboeuf vector=vector+1 565905a36a2SIngo Molnar .endr 566905a36a2SIngo MolnarEND(irq_entries_start) 567905a36a2SIngo Molnar 5681d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 5691d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 570e17f8234SBoris Ostrovsky pushq %rax 571e17f8234SBoris Ostrovsky SAVE_FLAGS(CLBR_RAX) 572e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 5731d3e53e8SAndy Lutomirski jz .Lokay_\@ 5741d3e53e8SAndy Lutomirski ud2 5751d3e53e8SAndy Lutomirski.Lokay_\@: 576e17f8234SBoris Ostrovsky popq %rax 5771d3e53e8SAndy Lutomirski#endif 5781d3e53e8SAndy Lutomirski.endm 5791d3e53e8SAndy Lutomirski 5801d3e53e8SAndy Lutomirski/* 5811d3e53e8SAndy Lutomirski * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 5821d3e53e8SAndy Lutomirski * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 5831d3e53e8SAndy Lutomirski * Requires kernel GSBASE. 5841d3e53e8SAndy Lutomirski * 5851d3e53e8SAndy Lutomirski * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 5861d3e53e8SAndy Lutomirski */ 5878c1f7558SJosh Poimboeuf.macro ENTER_IRQ_STACK regs=1 old_rsp 5881d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 5891d3e53e8SAndy Lutomirski movq %rsp, \old_rsp 5908c1f7558SJosh Poimboeuf 5918c1f7558SJosh Poimboeuf .if \regs 5928c1f7558SJosh Poimboeuf UNWIND_HINT_REGS base=\old_rsp 5938c1f7558SJosh Poimboeuf .endif 5948c1f7558SJosh Poimboeuf 5951d3e53e8SAndy Lutomirski incl PER_CPU_VAR(irq_count) 59629955909SAndy Lutomirski jnz .Lirq_stack_push_old_rsp_\@ 5971d3e53e8SAndy Lutomirski 5981d3e53e8SAndy Lutomirski /* 5991d3e53e8SAndy Lutomirski * Right now, if we just incremented irq_count to zero, we've 6001d3e53e8SAndy Lutomirski * claimed the IRQ stack but we haven't switched to it yet. 6011d3e53e8SAndy Lutomirski * 6021d3e53e8SAndy Lutomirski * If anything is added that can interrupt us here without using IST, 6031d3e53e8SAndy Lutomirski * it must be *extremely* careful to limit its stack usage. This 6041d3e53e8SAndy Lutomirski * could include kprobes and a hypothetical future IST-less #DB 6051d3e53e8SAndy Lutomirski * handler. 60629955909SAndy Lutomirski * 60729955909SAndy Lutomirski * The OOPS unwinder relies on the word at the top of the IRQ 60829955909SAndy Lutomirski * stack linking back to the previous RSP for the entire time we're 60929955909SAndy Lutomirski * on the IRQ stack. For this to work reliably, we need to write 61029955909SAndy Lutomirski * it before we actually move ourselves to the IRQ stack. 6111d3e53e8SAndy Lutomirski */ 6121d3e53e8SAndy Lutomirski 61329955909SAndy Lutomirski movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 61429955909SAndy Lutomirski movq PER_CPU_VAR(irq_stack_ptr), %rsp 61529955909SAndy Lutomirski 61629955909SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 61729955909SAndy Lutomirski /* 61829955909SAndy Lutomirski * If the first movq above becomes wrong due to IRQ stack layout 61929955909SAndy Lutomirski * changes, the only way we'll notice is if we try to unwind right 62029955909SAndy Lutomirski * here. Assert that we set up the stack right to catch this type 62129955909SAndy Lutomirski * of bug quickly. 62229955909SAndy Lutomirski */ 62329955909SAndy Lutomirski cmpq -8(%rsp), \old_rsp 62429955909SAndy Lutomirski je .Lirq_stack_okay\@ 62529955909SAndy Lutomirski ud2 62629955909SAndy Lutomirski .Lirq_stack_okay\@: 62729955909SAndy Lutomirski#endif 62829955909SAndy Lutomirski 62929955909SAndy Lutomirski.Lirq_stack_push_old_rsp_\@: 6301d3e53e8SAndy Lutomirski pushq \old_rsp 6318c1f7558SJosh Poimboeuf 6328c1f7558SJosh Poimboeuf .if \regs 6338c1f7558SJosh Poimboeuf UNWIND_HINT_REGS indirect=1 6348c1f7558SJosh Poimboeuf .endif 6351d3e53e8SAndy Lutomirski.endm 6361d3e53e8SAndy Lutomirski 6371d3e53e8SAndy Lutomirski/* 6381d3e53e8SAndy Lutomirski * Undoes ENTER_IRQ_STACK. 6391d3e53e8SAndy Lutomirski */ 6408c1f7558SJosh Poimboeuf.macro LEAVE_IRQ_STACK regs=1 6411d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 6421d3e53e8SAndy Lutomirski /* We need to be off the IRQ stack before decrementing irq_count. */ 6431d3e53e8SAndy Lutomirski popq %rsp 6441d3e53e8SAndy Lutomirski 6458c1f7558SJosh Poimboeuf .if \regs 6468c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 6478c1f7558SJosh Poimboeuf .endif 6488c1f7558SJosh Poimboeuf 6491d3e53e8SAndy Lutomirski /* 6501d3e53e8SAndy Lutomirski * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 6511d3e53e8SAndy Lutomirski * the irq stack but we're not on it. 6521d3e53e8SAndy Lutomirski */ 6531d3e53e8SAndy Lutomirski 6541d3e53e8SAndy Lutomirski decl PER_CPU_VAR(irq_count) 6551d3e53e8SAndy Lutomirski.endm 6561d3e53e8SAndy Lutomirski 657905a36a2SIngo Molnar/* 658905a36a2SIngo Molnar * Interrupt entry/exit. 659905a36a2SIngo Molnar * 660905a36a2SIngo Molnar * Interrupt entry points save only callee clobbered registers in fast path. 661905a36a2SIngo Molnar * 662905a36a2SIngo Molnar * Entry runs with interrupts off. 663905a36a2SIngo Molnar */ 664905a36a2SIngo Molnar 665905a36a2SIngo Molnar/* 0(%rsp): ~(interrupt number) */ 666905a36a2SIngo Molnar .macro interrupt func 667905a36a2SIngo Molnar cld 6687f2590a1SAndy Lutomirski 6697f2590a1SAndy Lutomirski testb $3, CS-ORIG_RAX(%rsp) 6707f2590a1SAndy Lutomirski jz 1f 6717f2590a1SAndy Lutomirski SWAPGS 6727f2590a1SAndy Lutomirski call switch_to_thread_stack 6737f2590a1SAndy Lutomirski1: 6747f2590a1SAndy Lutomirski 675ff467594SAndy Lutomirski ALLOC_PT_GPREGS_ON_STACK 676ff467594SAndy Lutomirski SAVE_C_REGS 677ff467594SAndy Lutomirski SAVE_EXTRA_REGS 678946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 679905a36a2SIngo Molnar 680ff467594SAndy Lutomirski testb $3, CS(%rsp) 681905a36a2SIngo Molnar jz 1f 68202bc7768SAndy Lutomirski 68302bc7768SAndy Lutomirski /* 6847f2590a1SAndy Lutomirski * IRQ from user mode. 6857f2590a1SAndy Lutomirski * 686f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 687f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 688f1075053SAndy Lutomirski * (which can take locks). Since TRACE_IRQS_OFF idempotent, 689f1075053SAndy Lutomirski * the simplest way to handle it is to just call it twice if 690f1075053SAndy Lutomirski * we enter from user mode. There's no reason to optimize this since 691f1075053SAndy Lutomirski * TRACE_IRQS_OFF is a no-op if lockdep is off. 692f1075053SAndy Lutomirski */ 693f1075053SAndy Lutomirski TRACE_IRQS_OFF 694f1075053SAndy Lutomirski 695478dc89cSAndy Lutomirski CALL_enter_from_user_mode 69602bc7768SAndy Lutomirski 697905a36a2SIngo Molnar1: 6981d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%rdi 699905a36a2SIngo Molnar /* We entered an interrupt context - irqs are off: */ 700905a36a2SIngo Molnar TRACE_IRQS_OFF 701905a36a2SIngo Molnar 702a586f98eSAndy Lutomirski call \func /* rdi points to pt_regs */ 703905a36a2SIngo Molnar .endm 704905a36a2SIngo Molnar 705905a36a2SIngo Molnar /* 706905a36a2SIngo Molnar * The interrupt stubs push (~vector+0x80) onto the stack and 707905a36a2SIngo Molnar * then jump to common_interrupt. 708905a36a2SIngo Molnar */ 709905a36a2SIngo Molnar .p2align CONFIG_X86_L1_CACHE_SHIFT 710905a36a2SIngo Molnarcommon_interrupt: 711905a36a2SIngo Molnar ASM_CLAC 712905a36a2SIngo Molnar addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 713905a36a2SIngo Molnar interrupt do_IRQ 714905a36a2SIngo Molnar /* 0(%rsp): old RSP */ 715905a36a2SIngo Molnarret_from_intr: 7162140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 717905a36a2SIngo Molnar TRACE_IRQS_OFF 718905a36a2SIngo Molnar 7191d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 720905a36a2SIngo Molnar 721905a36a2SIngo Molnar testb $3, CS(%rsp) 722905a36a2SIngo Molnar jz retint_kernel 72302bc7768SAndy Lutomirski 724905a36a2SIngo Molnar /* Interrupt came from user space */ 72502bc7768SAndy LutomirskiGLOBAL(retint_user) 72602bc7768SAndy Lutomirski mov %rsp,%rdi 72702bc7768SAndy Lutomirski call prepare_exit_to_usermode 728905a36a2SIngo Molnar TRACE_IRQS_IRETQ 72926c4ef9cSAndy Lutomirski 7308a055d7fSAndy LutomirskiGLOBAL(swapgs_restore_regs_and_return_to_usermode) 73126c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 73226c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 7331e4c4f61SBorislav Petkov testb $3, CS(%rsp) 73426c4ef9cSAndy Lutomirski jnz 1f 73526c4ef9cSAndy Lutomirski ud2 73626c4ef9cSAndy Lutomirski1: 73726c4ef9cSAndy Lutomirski#endif 738e872045bSAndy Lutomirski POP_EXTRA_REGS 7393e3b9293SAndy Lutomirski popq %r11 7403e3b9293SAndy Lutomirski popq %r10 7413e3b9293SAndy Lutomirski popq %r9 7423e3b9293SAndy Lutomirski popq %r8 7433e3b9293SAndy Lutomirski popq %rax 7443e3b9293SAndy Lutomirski popq %rcx 7453e3b9293SAndy Lutomirski popq %rdx 7463e3b9293SAndy Lutomirski popq %rsi 7473e3b9293SAndy Lutomirski 7483e3b9293SAndy Lutomirski /* 7493e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 7503e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 7513e3b9293SAndy Lutomirski */ 7523e3b9293SAndy Lutomirski movq %rsp, %rdi 753c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 7543e3b9293SAndy Lutomirski 7553e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 7563e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 7573e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 7583e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 7593e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 7603e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 7613e3b9293SAndy Lutomirski 7623e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 7633e3b9293SAndy Lutomirski pushq (%rdi) 7643e3b9293SAndy Lutomirski 7653e3b9293SAndy Lutomirski /* 7663e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 7673e3b9293SAndy Lutomirski * We can do future final exit work right here. 7683e3b9293SAndy Lutomirski */ 7693e3b9293SAndy Lutomirski 7706fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 7718a09317bSDave Hansen 7723e3b9293SAndy Lutomirski /* Restore RDI. */ 7733e3b9293SAndy Lutomirski popq %rdi 7743e3b9293SAndy Lutomirski SWAPGS 77526c4ef9cSAndy Lutomirski INTERRUPT_RETURN 77626c4ef9cSAndy Lutomirski 777905a36a2SIngo Molnar 778905a36a2SIngo Molnar/* Returning to kernel space */ 779905a36a2SIngo Molnarretint_kernel: 780905a36a2SIngo Molnar#ifdef CONFIG_PREEMPT 781905a36a2SIngo Molnar /* Interrupts are off */ 782905a36a2SIngo Molnar /* Check if we need preemption */ 7834d732138SIngo Molnar bt $9, EFLAGS(%rsp) /* were interrupts off? */ 784905a36a2SIngo Molnar jnc 1f 785905a36a2SIngo Molnar0: cmpl $0, PER_CPU_VAR(__preempt_count) 786905a36a2SIngo Molnar jnz 1f 787905a36a2SIngo Molnar call preempt_schedule_irq 788905a36a2SIngo Molnar jmp 0b 789905a36a2SIngo Molnar1: 790905a36a2SIngo Molnar#endif 791905a36a2SIngo Molnar /* 792905a36a2SIngo Molnar * The iretq could re-enable interrupts: 793905a36a2SIngo Molnar */ 794905a36a2SIngo Molnar TRACE_IRQS_IRETQ 795905a36a2SIngo Molnar 79626c4ef9cSAndy LutomirskiGLOBAL(restore_regs_and_return_to_kernel) 79726c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 79826c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 7991e4c4f61SBorislav Petkov testb $3, CS(%rsp) 80026c4ef9cSAndy Lutomirski jz 1f 80126c4ef9cSAndy Lutomirski ud2 80226c4ef9cSAndy Lutomirski1: 80326c4ef9cSAndy Lutomirski#endif 804e872045bSAndy Lutomirski POP_EXTRA_REGS 805e872045bSAndy Lutomirski POP_C_REGS 806e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 807905a36a2SIngo Molnar INTERRUPT_RETURN 808905a36a2SIngo Molnar 809905a36a2SIngo MolnarENTRY(native_iret) 8108c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 811905a36a2SIngo Molnar /* 812905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 813905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 814905a36a2SIngo Molnar */ 815905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 816905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 817905a36a2SIngo Molnar jnz native_irq_return_ldt 818905a36a2SIngo Molnar#endif 819905a36a2SIngo Molnar 820905a36a2SIngo Molnar.global native_irq_return_iret 821905a36a2SIngo Molnarnative_irq_return_iret: 822905a36a2SIngo Molnar /* 823905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 824905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 825905a36a2SIngo Molnar * Double-faults due to espfix64 are handled in do_double_fault. 826905a36a2SIngo Molnar * Other faults here are fatal. 827905a36a2SIngo Molnar */ 828905a36a2SIngo Molnar iretq 829905a36a2SIngo Molnar 830905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 831905a36a2SIngo Molnarnative_irq_return_ldt: 83285063facSAndy Lutomirski /* 83385063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 83485063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 83585063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 83685063facSAndy Lutomirski * of the ESPFIX stack. 83785063facSAndy Lutomirski * 83885063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 83985063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 84085063facSAndy Lutomirski * 84185063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 84285063facSAndy Lutomirski * 84385063facSAndy Lutomirski * --- top of ESPFIX stack --- 84485063facSAndy Lutomirski * SS 84585063facSAndy Lutomirski * RSP 84685063facSAndy Lutomirski * RFLAGS 84785063facSAndy Lutomirski * CS 84885063facSAndy Lutomirski * RIP <-- RSP points here when we're done 84985063facSAndy Lutomirski * RAX <-- espfix_waddr points here 85085063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 85185063facSAndy Lutomirski */ 85285063facSAndy Lutomirski 85385063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 8548a09317bSDave Hansen SWAPGS /* to kernel GS */ 8558a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 8568a09317bSDave Hansen 857905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 85885063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 85985063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 860905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 86185063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 862905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 86385063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 864905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 86585063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 866905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 86785063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 868905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 86985063facSAndy Lutomirski /* Now RAX == RSP. */ 87085063facSAndy Lutomirski 87185063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 87285063facSAndy Lutomirski 87385063facSAndy Lutomirski /* 87485063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 87585063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 87685063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 87785063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 87885063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 87985063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 88085063facSAndy Lutomirski */ 881905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 8828a09317bSDave Hansen 8836fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 8848a09317bSDave Hansen SWAPGS /* to user GS */ 8858a09317bSDave Hansen popq %rdi /* Restore user RDI */ 8868a09317bSDave Hansen 887905a36a2SIngo Molnar movq %rax, %rsp 8888c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 88985063facSAndy Lutomirski 89085063facSAndy Lutomirski /* 89185063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 89285063facSAndy Lutomirski * still read. 89385063facSAndy Lutomirski */ 89485063facSAndy Lutomirski popq %rax /* Restore user RAX */ 89585063facSAndy Lutomirski 89685063facSAndy Lutomirski /* 89785063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 89885063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 89985063facSAndy Lutomirski * values. We can now IRET back to userspace. 90085063facSAndy Lutomirski */ 901905a36a2SIngo Molnar jmp native_irq_return_iret 902905a36a2SIngo Molnar#endif 903905a36a2SIngo MolnarEND(common_interrupt) 904905a36a2SIngo Molnar 905905a36a2SIngo Molnar/* 906905a36a2SIngo Molnar * APIC interrupts. 907905a36a2SIngo Molnar */ 908905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym 909905a36a2SIngo MolnarENTRY(\sym) 9108c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 911905a36a2SIngo Molnar ASM_CLAC 912905a36a2SIngo Molnar pushq $~(\num) 913905a36a2SIngo Molnar.Lcommon_\sym: 914905a36a2SIngo Molnar interrupt \do_sym 915905a36a2SIngo Molnar jmp ret_from_intr 916905a36a2SIngo MolnarEND(\sym) 917905a36a2SIngo Molnar.endm 918905a36a2SIngo Molnar 919469f0023SAlexander Potapenko/* Make sure APIC interrupt handlers end up in the irqentry section: */ 920469f0023SAlexander Potapenko#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 921469f0023SAlexander Potapenko#define POP_SECTION_IRQENTRY .popsection 922469f0023SAlexander Potapenko 923905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym 924469f0023SAlexander PotapenkoPUSH_SECTION_IRQENTRY 925905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym 926469f0023SAlexander PotapenkoPOP_SECTION_IRQENTRY 927905a36a2SIngo Molnar.endm 928905a36a2SIngo Molnar 929905a36a2SIngo Molnar#ifdef CONFIG_SMP 9304d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 9314d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 932905a36a2SIngo Molnar#endif 933905a36a2SIngo Molnar 934905a36a2SIngo Molnar#ifdef CONFIG_X86_UV 9354d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 936905a36a2SIngo Molnar#endif 9374d732138SIngo Molnar 9384d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 9394d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 940905a36a2SIngo Molnar 941905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM 9424d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 9434d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 944210f84b0SWincy Vanapicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 945905a36a2SIngo Molnar#endif 946905a36a2SIngo Molnar 947905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD 9484d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 949905a36a2SIngo Molnar#endif 950905a36a2SIngo Molnar 9519dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD 9524d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 9539dda1658SIngo Molnar#endif 9549dda1658SIngo Molnar 955905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR 9564d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 957905a36a2SIngo Molnar#endif 958905a36a2SIngo Molnar 959905a36a2SIngo Molnar#ifdef CONFIG_SMP 9604d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 9614d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 9624d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 963905a36a2SIngo Molnar#endif 964905a36a2SIngo Molnar 9654d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 9664d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 967905a36a2SIngo Molnar 968905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK 9694d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 970905a36a2SIngo Molnar#endif 971905a36a2SIngo Molnar 972905a36a2SIngo Molnar/* 973905a36a2SIngo Molnar * Exception entry points. 974905a36a2SIngo Molnar */ 975c482feefSAndy Lutomirski#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) 976905a36a2SIngo Molnar 9777f2590a1SAndy Lutomirski/* 9787f2590a1SAndy Lutomirski * Switch to the thread stack. This is called with the IRET frame and 9797f2590a1SAndy Lutomirski * orig_ax on the stack. (That is, RDI..R12 are not on the stack and 9807f2590a1SAndy Lutomirski * space has not been allocated for them.) 9817f2590a1SAndy Lutomirski */ 9827f2590a1SAndy LutomirskiENTRY(switch_to_thread_stack) 9837f2590a1SAndy Lutomirski UNWIND_HINT_FUNC 9847f2590a1SAndy Lutomirski 9857f2590a1SAndy Lutomirski pushq %rdi 9868a09317bSDave Hansen /* Need to switch before accessing the thread stack. */ 9878a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 9887f2590a1SAndy Lutomirski movq %rsp, %rdi 9897f2590a1SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 9907f2590a1SAndy Lutomirski UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI 9917f2590a1SAndy Lutomirski 9927f2590a1SAndy Lutomirski pushq 7*8(%rdi) /* regs->ss */ 9937f2590a1SAndy Lutomirski pushq 6*8(%rdi) /* regs->rsp */ 9947f2590a1SAndy Lutomirski pushq 5*8(%rdi) /* regs->eflags */ 9957f2590a1SAndy Lutomirski pushq 4*8(%rdi) /* regs->cs */ 9967f2590a1SAndy Lutomirski pushq 3*8(%rdi) /* regs->ip */ 9977f2590a1SAndy Lutomirski pushq 2*8(%rdi) /* regs->orig_ax */ 9987f2590a1SAndy Lutomirski pushq 8(%rdi) /* return address */ 9997f2590a1SAndy Lutomirski UNWIND_HINT_FUNC 10007f2590a1SAndy Lutomirski 10017f2590a1SAndy Lutomirski movq (%rdi), %rdi 10027f2590a1SAndy Lutomirski ret 10037f2590a1SAndy LutomirskiEND(switch_to_thread_stack) 1004905a36a2SIngo Molnar 1005905a36a2SIngo Molnar.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 1006905a36a2SIngo MolnarENTRY(\sym) 100798990a33SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=\has_error_code*8 10088c1f7558SJosh Poimboeuf 1009905a36a2SIngo Molnar /* Sanity check */ 1010905a36a2SIngo Molnar .if \shift_ist != -1 && \paranoid == 0 1011905a36a2SIngo Molnar .error "using shift_ist requires paranoid=1" 1012905a36a2SIngo Molnar .endif 1013905a36a2SIngo Molnar 1014905a36a2SIngo Molnar ASM_CLAC 1015905a36a2SIngo Molnar 101682c62fa0SJosh Poimboeuf .if \has_error_code == 0 1017905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1018905a36a2SIngo Molnar .endif 1019905a36a2SIngo Molnar 1020905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1021905a36a2SIngo Molnar 10227f2590a1SAndy Lutomirski .if \paranoid < 2 10234d732138SIngo Molnar testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 10247f2590a1SAndy Lutomirski jnz .Lfrom_usermode_switch_stack_\@ 1025905a36a2SIngo Molnar .endif 10267f2590a1SAndy Lutomirski 10277f2590a1SAndy Lutomirski .if \paranoid 1028905a36a2SIngo Molnar call paranoid_entry 1029905a36a2SIngo Molnar .else 1030905a36a2SIngo Molnar call error_entry 1031905a36a2SIngo Molnar .endif 10328c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1033905a36a2SIngo Molnar /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 1034905a36a2SIngo Molnar 1035905a36a2SIngo Molnar .if \paranoid 1036905a36a2SIngo Molnar .if \shift_ist != -1 1037905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 1038905a36a2SIngo Molnar .else 1039905a36a2SIngo Molnar TRACE_IRQS_OFF 1040905a36a2SIngo Molnar .endif 1041905a36a2SIngo Molnar .endif 1042905a36a2SIngo Molnar 1043905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 1044905a36a2SIngo Molnar 1045905a36a2SIngo Molnar .if \has_error_code 1046905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 1047905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 1048905a36a2SIngo Molnar .else 1049905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 1050905a36a2SIngo Molnar .endif 1051905a36a2SIngo Molnar 1052905a36a2SIngo Molnar .if \shift_ist != -1 1053905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 1054905a36a2SIngo Molnar .endif 1055905a36a2SIngo Molnar 1056905a36a2SIngo Molnar call \do_sym 1057905a36a2SIngo Molnar 1058905a36a2SIngo Molnar .if \shift_ist != -1 1059905a36a2SIngo Molnar addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 1060905a36a2SIngo Molnar .endif 1061905a36a2SIngo Molnar 1062905a36a2SIngo Molnar /* these procedures expect "no swapgs" flag in ebx */ 1063905a36a2SIngo Molnar .if \paranoid 1064905a36a2SIngo Molnar jmp paranoid_exit 1065905a36a2SIngo Molnar .else 1066905a36a2SIngo Molnar jmp error_exit 1067905a36a2SIngo Molnar .endif 1068905a36a2SIngo Molnar 10697f2590a1SAndy Lutomirski .if \paranoid < 2 1070905a36a2SIngo Molnar /* 10717f2590a1SAndy Lutomirski * Entry from userspace. Switch stacks and treat it 1072905a36a2SIngo Molnar * as a normal entry. This means that paranoid handlers 1073905a36a2SIngo Molnar * run in real process context if user_mode(regs). 1074905a36a2SIngo Molnar */ 10757f2590a1SAndy Lutomirski.Lfrom_usermode_switch_stack_\@: 1076905a36a2SIngo Molnar call error_entry 1077905a36a2SIngo Molnar 1078905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 1079905a36a2SIngo Molnar 1080905a36a2SIngo Molnar .if \has_error_code 1081905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 1082905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 1083905a36a2SIngo Molnar .else 1084905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 1085905a36a2SIngo Molnar .endif 1086905a36a2SIngo Molnar 1087905a36a2SIngo Molnar call \do_sym 1088905a36a2SIngo Molnar 1089905a36a2SIngo Molnar jmp error_exit /* %ebx: no swapgs flag */ 1090905a36a2SIngo Molnar .endif 1091905a36a2SIngo MolnarEND(\sym) 1092905a36a2SIngo Molnar.endm 1093905a36a2SIngo Molnar 1094905a36a2SIngo Molnaridtentry divide_error do_divide_error has_error_code=0 1095905a36a2SIngo Molnaridtentry overflow do_overflow has_error_code=0 1096905a36a2SIngo Molnaridtentry bounds do_bounds has_error_code=0 1097905a36a2SIngo Molnaridtentry invalid_op do_invalid_op has_error_code=0 1098905a36a2SIngo Molnaridtentry device_not_available do_device_not_available has_error_code=0 1099905a36a2SIngo Molnaridtentry double_fault do_double_fault has_error_code=1 paranoid=2 1100905a36a2SIngo Molnaridtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 1101905a36a2SIngo Molnaridtentry invalid_TSS do_invalid_TSS has_error_code=1 1102905a36a2SIngo Molnaridtentry segment_not_present do_segment_not_present has_error_code=1 1103905a36a2SIngo Molnaridtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 1104905a36a2SIngo Molnaridtentry coprocessor_error do_coprocessor_error has_error_code=0 1105905a36a2SIngo Molnaridtentry alignment_check do_alignment_check has_error_code=1 1106905a36a2SIngo Molnaridtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 1107905a36a2SIngo Molnar 1108905a36a2SIngo Molnar 11094d732138SIngo Molnar /* 11104d732138SIngo Molnar * Reload gs selector with exception handling 11114d732138SIngo Molnar * edi: new selector 11124d732138SIngo Molnar */ 1113905a36a2SIngo MolnarENTRY(native_load_gs_index) 11148c1f7558SJosh Poimboeuf FRAME_BEGIN 1115905a36a2SIngo Molnar pushfq 1116905a36a2SIngo Molnar DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 1117ca37e57bSAndy Lutomirski TRACE_IRQS_OFF 1118905a36a2SIngo Molnar SWAPGS 111942c748bbSBorislav Petkov.Lgs_change: 1120905a36a2SIngo Molnar movl %edi, %gs 112196e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 1122905a36a2SIngo Molnar SWAPGS 1123ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS (%rsp) 1124905a36a2SIngo Molnar popfq 11258c1f7558SJosh Poimboeuf FRAME_END 1126905a36a2SIngo Molnar ret 11278c1f7558SJosh PoimboeufENDPROC(native_load_gs_index) 1128784d5699SAl ViroEXPORT_SYMBOL(native_load_gs_index) 1129905a36a2SIngo Molnar 113042c748bbSBorislav Petkov _ASM_EXTABLE(.Lgs_change, bad_gs) 1131905a36a2SIngo Molnar .section .fixup, "ax" 1132905a36a2SIngo Molnar /* running with kernelgs */ 1133905a36a2SIngo Molnarbad_gs: 1134905a36a2SIngo Molnar SWAPGS /* switch back to user gs */ 1135b038c842SAndy Lutomirski.macro ZAP_GS 1136b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 1137b038c842SAndy Lutomirski movl $__USER_DS, %eax 1138b038c842SAndy Lutomirski movl %eax, %gs 1139b038c842SAndy Lutomirski.endm 1140b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 1141905a36a2SIngo Molnar xorl %eax, %eax 1142905a36a2SIngo Molnar movl %eax, %gs 1143905a36a2SIngo Molnar jmp 2b 1144905a36a2SIngo Molnar .previous 1145905a36a2SIngo Molnar 1146905a36a2SIngo Molnar/* Call softirq on interrupt stack. Interrupts are off. */ 1147905a36a2SIngo MolnarENTRY(do_softirq_own_stack) 1148905a36a2SIngo Molnar pushq %rbp 1149905a36a2SIngo Molnar mov %rsp, %rbp 11508c1f7558SJosh Poimboeuf ENTER_IRQ_STACK regs=0 old_rsp=%r11 1151905a36a2SIngo Molnar call __do_softirq 11528c1f7558SJosh Poimboeuf LEAVE_IRQ_STACK regs=0 1153905a36a2SIngo Molnar leaveq 1154905a36a2SIngo Molnar ret 11558c1f7558SJosh PoimboeufENDPROC(do_softirq_own_stack) 1156905a36a2SIngo Molnar 1157905a36a2SIngo Molnar#ifdef CONFIG_XEN 11585878d5d6SJuergen Grossidtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 1159905a36a2SIngo Molnar 1160905a36a2SIngo Molnar/* 1161905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 1162905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 1163905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 1164905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 1165905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 1166905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 1167905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 1168905a36a2SIngo Molnar * like to avoid the possibility. 1169905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 1170905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 1171905a36a2SIngo Molnar * activation and restart the handler using the previous one. 1172905a36a2SIngo Molnar */ 11734d732138SIngo MolnarENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 11744d732138SIngo Molnar 1175905a36a2SIngo Molnar/* 1176905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1177905a36a2SIngo Molnar * see the correct pointer to the pt_regs 1178905a36a2SIngo Molnar */ 11798c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 11804d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 11818c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 11821d3e53e8SAndy Lutomirski 11831d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%r10 1184905a36a2SIngo Molnar call xen_evtchn_do_upcall 11851d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 11861d3e53e8SAndy Lutomirski 1187905a36a2SIngo Molnar#ifndef CONFIG_PREEMPT 1188905a36a2SIngo Molnar call xen_maybe_preempt_hcall 1189905a36a2SIngo Molnar#endif 1190905a36a2SIngo Molnar jmp error_exit 1191905a36a2SIngo MolnarEND(xen_do_hypervisor_callback) 1192905a36a2SIngo Molnar 1193905a36a2SIngo Molnar/* 1194905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 1195905a36a2SIngo Molnar * We get here for two reasons: 1196905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 1197905a36a2SIngo Molnar * 2. Fault while executing IRET 1198905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 1199905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 1200905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 1201905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 1202905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1203905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 1204905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 1205905a36a2SIngo Molnar */ 1206905a36a2SIngo MolnarENTRY(xen_failsafe_callback) 12078c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1208905a36a2SIngo Molnar movl %ds, %ecx 1209905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 1210905a36a2SIngo Molnar jne 1f 1211905a36a2SIngo Molnar movl %es, %ecx 1212905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 1213905a36a2SIngo Molnar jne 1f 1214905a36a2SIngo Molnar movl %fs, %ecx 1215905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 1216905a36a2SIngo Molnar jne 1f 1217905a36a2SIngo Molnar movl %gs, %ecx 1218905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 1219905a36a2SIngo Molnar jne 1f 1220905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 1221905a36a2SIngo Molnar movq (%rsp), %rcx 1222905a36a2SIngo Molnar movq 8(%rsp), %r11 1223905a36a2SIngo Molnar addq $0x30, %rsp 1224905a36a2SIngo Molnar pushq $0 /* RIP */ 12258c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 1226905a36a2SIngo Molnar jmp general_protection 1227905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1228905a36a2SIngo Molnar movq (%rsp), %rcx 1229905a36a2SIngo Molnar movq 8(%rsp), %r11 1230905a36a2SIngo Molnar addq $0x30, %rsp 12318c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1232905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 1233905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1234905a36a2SIngo Molnar SAVE_C_REGS 1235905a36a2SIngo Molnar SAVE_EXTRA_REGS 1236946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 1237905a36a2SIngo Molnar jmp error_exit 1238905a36a2SIngo MolnarEND(xen_failsafe_callback) 1239905a36a2SIngo Molnar 1240905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1241905a36a2SIngo Molnar xen_hvm_callback_vector xen_evtchn_do_upcall 1242905a36a2SIngo Molnar 1243905a36a2SIngo Molnar#endif /* CONFIG_XEN */ 1244905a36a2SIngo Molnar 1245905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV) 1246905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1247905a36a2SIngo Molnar hyperv_callback_vector hyperv_vector_handler 1248*93286261SVitaly Kuznetsov 1249*93286261SVitaly Kuznetsovapicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ 1250*93286261SVitaly Kuznetsov hyperv_reenlightenment_vector hyperv_reenlightenment_intr 1251905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */ 1252905a36a2SIngo Molnar 1253905a36a2SIngo Molnaridtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1254905a36a2SIngo Molnaridtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1255905a36a2SIngo Molnaridtentry stack_segment do_stack_segment has_error_code=1 12564d732138SIngo Molnar 1257905a36a2SIngo Molnar#ifdef CONFIG_XEN 125843e41110SJuergen Grossidtentry xennmi do_nmi has_error_code=0 12595878d5d6SJuergen Grossidtentry xendebug do_debug has_error_code=0 12605878d5d6SJuergen Grossidtentry xenint3 do_int3 has_error_code=0 1261905a36a2SIngo Molnar#endif 12624d732138SIngo Molnar 1263905a36a2SIngo Molnaridtentry general_protection do_general_protection has_error_code=1 126411a7ffb0SThomas Gleixneridtentry page_fault do_page_fault has_error_code=1 12654d732138SIngo Molnar 1266905a36a2SIngo Molnar#ifdef CONFIG_KVM_GUEST 1267905a36a2SIngo Molnaridtentry async_page_fault do_async_page_fault has_error_code=1 1268905a36a2SIngo Molnar#endif 12694d732138SIngo Molnar 1270905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE 12716f41c34dSThomas Gleixneridtentry machine_check do_mce has_error_code=0 paranoid=1 1272905a36a2SIngo Molnar#endif 1273905a36a2SIngo Molnar 1274905a36a2SIngo Molnar/* 1275905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1276905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check. 1277905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1278905a36a2SIngo Molnar */ 1279905a36a2SIngo MolnarENTRY(paranoid_entry) 12808c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1281905a36a2SIngo Molnar cld 1282905a36a2SIngo Molnar SAVE_C_REGS 8 1283905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1284946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1285905a36a2SIngo Molnar movl $1, %ebx 1286905a36a2SIngo Molnar movl $MSR_GS_BASE, %ecx 1287905a36a2SIngo Molnar rdmsr 1288905a36a2SIngo Molnar testl %edx, %edx 1289905a36a2SIngo Molnar js 1f /* negative -> in kernel */ 1290905a36a2SIngo Molnar SWAPGS 1291905a36a2SIngo Molnar xorl %ebx, %ebx 12928a09317bSDave Hansen 12938a09317bSDave Hansen1: 12948a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 12958a09317bSDave Hansen 12968a09317bSDave Hansen ret 1297905a36a2SIngo MolnarEND(paranoid_entry) 1298905a36a2SIngo Molnar 1299905a36a2SIngo Molnar/* 1300905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 1301905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 1302905a36a2SIngo Molnar * from kernel space. 1303905a36a2SIngo Molnar * 1304905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 1305905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 1306905a36a2SIngo Molnar * be complicated. Fortunately, we there's no good reason 1307905a36a2SIngo Molnar * to try to handle preemption here. 13084d732138SIngo Molnar * 13094d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1310905a36a2SIngo Molnar */ 1311905a36a2SIngo MolnarENTRY(paranoid_exit) 13128c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 13132140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1314905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG 1315905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1316e5317832SAndy Lutomirski jnz .Lparanoid_exit_no_swapgs 1317905a36a2SIngo Molnar TRACE_IRQS_IRETQ 131821e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1319905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1320e5317832SAndy Lutomirski jmp .Lparanoid_exit_restore 1321e5317832SAndy Lutomirski.Lparanoid_exit_no_swapgs: 1322905a36a2SIngo Molnar TRACE_IRQS_IRETQ_DEBUG 1323e5317832SAndy Lutomirski.Lparanoid_exit_restore: 1324e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 1325905a36a2SIngo MolnarEND(paranoid_exit) 1326905a36a2SIngo Molnar 1327905a36a2SIngo Molnar/* 1328905a36a2SIngo Molnar * Save all registers in pt_regs, and switch gs if needed. 1329539f5113SAndy Lutomirski * Return: EBX=0: came from user mode; EBX=1: otherwise 1330905a36a2SIngo Molnar */ 1331905a36a2SIngo MolnarENTRY(error_entry) 13328c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1333905a36a2SIngo Molnar cld 1334905a36a2SIngo Molnar SAVE_C_REGS 8 1335905a36a2SIngo Molnar SAVE_EXTRA_REGS 8 1336946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 8 1337905a36a2SIngo Molnar xorl %ebx, %ebx 1338905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1339cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1340539f5113SAndy Lutomirski 1341cb6f64edSAndy Lutomirski /* 1342cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1343cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1344cb6f64edSAndy Lutomirski */ 1345905a36a2SIngo Molnar SWAPGS 13468a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 13478a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1348539f5113SAndy Lutomirski 1349cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 13507f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 13517f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 13527f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 13537f2590a1SAndy Lutomirski call sync_regs 13547f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 13557f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 13567f2590a1SAndy Lutomirski pushq %r12 13577f2590a1SAndy Lutomirski 1358f1075053SAndy Lutomirski /* 1359f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 1360f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 1361f1075053SAndy Lutomirski * (which can take locks). 1362f1075053SAndy Lutomirski */ 1363f1075053SAndy Lutomirski TRACE_IRQS_OFF 1364478dc89cSAndy Lutomirski CALL_enter_from_user_mode 1365f1075053SAndy Lutomirski ret 136602bc7768SAndy Lutomirski 1367cb6f64edSAndy Lutomirski.Lerror_entry_done: 1368905a36a2SIngo Molnar TRACE_IRQS_OFF 1369905a36a2SIngo Molnar ret 1370905a36a2SIngo Molnar 1371905a36a2SIngo Molnar /* 1372905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1373905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1374905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1375905a36a2SIngo Molnar * for these here too. 1376905a36a2SIngo Molnar */ 1377cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1378905a36a2SIngo Molnar incl %ebx 1379905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1380905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1381cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1382905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1383905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1384cb6f64edSAndy Lutomirski je .Lbstep_iret 138542c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 1386cb6f64edSAndy Lutomirski jne .Lerror_entry_done 1387539f5113SAndy Lutomirski 1388539f5113SAndy Lutomirski /* 138942c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1390539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 139142c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1392539f5113SAndy Lutomirski */ 13932fa5f04fSWanpeng Li SWAPGS 13948a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 13952fa5f04fSWanpeng Li jmp .Lerror_entry_done 1396905a36a2SIngo Molnar 1397cb6f64edSAndy Lutomirski.Lbstep_iret: 1398905a36a2SIngo Molnar /* Fix truncated RIP */ 1399905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1400905a36a2SIngo Molnar /* fall through */ 1401905a36a2SIngo Molnar 1402cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1403539f5113SAndy Lutomirski /* 14048a09317bSDave Hansen * We came from an IRET to user mode, so we have user 14058a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 1406539f5113SAndy Lutomirski */ 1407905a36a2SIngo Molnar SWAPGS 14088a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1409539f5113SAndy Lutomirski 1410539f5113SAndy Lutomirski /* 1411539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1412539f5113SAndy Lutomirski * as if we faulted immediately after IRET and clear EBX so that 1413539f5113SAndy Lutomirski * error_exit knows that we will be returning to user mode. 1414539f5113SAndy Lutomirski */ 1415905a36a2SIngo Molnar mov %rsp, %rdi 1416905a36a2SIngo Molnar call fixup_bad_iret 1417905a36a2SIngo Molnar mov %rax, %rsp 1418539f5113SAndy Lutomirski decl %ebx 1419cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1420905a36a2SIngo MolnarEND(error_entry) 1421905a36a2SIngo Molnar 1422905a36a2SIngo Molnar 1423539f5113SAndy Lutomirski/* 142475ca5b22SNicolas Iooss * On entry, EBX is a "return to kernel mode" flag: 1425539f5113SAndy Lutomirski * 1: already in kernel mode, don't need SWAPGS 1426539f5113SAndy Lutomirski * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1427539f5113SAndy Lutomirski */ 1428905a36a2SIngo MolnarENTRY(error_exit) 14298c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 14302140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1431905a36a2SIngo Molnar TRACE_IRQS_OFF 14322140a994SJan Beulich testl %ebx, %ebx 1433905a36a2SIngo Molnar jnz retint_kernel 1434905a36a2SIngo Molnar jmp retint_user 1435905a36a2SIngo MolnarEND(error_exit) 1436905a36a2SIngo Molnar 1437929bacecSAndy Lutomirski/* 1438929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1439929bacecSAndy Lutomirski * so we can use real assembly here. 14408a09317bSDave Hansen * 14418a09317bSDave Hansen * Registers: 14428a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 14438a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1444929bacecSAndy Lutomirski */ 1445905a36a2SIngo MolnarENTRY(nmi) 14468c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1447929bacecSAndy Lutomirski 1448fc57a7c6SAndy Lutomirski /* 1449905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1450905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1451905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1452905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1453905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1454905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1455905a36a2SIngo Molnar * anyway. 1456905a36a2SIngo Molnar * 1457905a36a2SIngo Molnar * To handle this case we do the following: 1458905a36a2SIngo Molnar * Check the a special location on the stack that contains 1459905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1460905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1461905a36a2SIngo Molnar * is an NMI stack. 1462905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1463905a36a2SIngo Molnar * stack then: 1464905a36a2SIngo Molnar * o Set the special variable on the stack 14650b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 14660b22930eSAndy Lutomirski * stack 14670b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1468905a36a2SIngo Molnar * o Continue processing the NMI 1469905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 14700b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1471905a36a2SIngo Molnar * o return back to the first NMI 1472905a36a2SIngo Molnar * 1473905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1474905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1475905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1476905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1477905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1478905a36a2SIngo Molnar * NMI. 14799b6e6a83SAndy Lutomirski * 14809b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 14819b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 14829b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 14839b6e6a83SAndy Lutomirski * other IST entries. 1484905a36a2SIngo Molnar */ 1485905a36a2SIngo Molnar 1486e93c1730SAndy Lutomirski ASM_CLAC 1487e93c1730SAndy Lutomirski 1488905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1489905a36a2SIngo Molnar pushq %rdx 1490905a36a2SIngo Molnar 14919b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 14929b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1493905a36a2SIngo Molnar 1494905a36a2SIngo Molnar /* 14959b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 14969b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 14979b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 14989b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 14999b6e6a83SAndy Lutomirski * are off. 150083c133cfSAndy Lutomirski * 150183c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 150283c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 15039b6e6a83SAndy Lutomirski */ 15049b6e6a83SAndy Lutomirski 1505929bacecSAndy Lutomirski swapgs 15069b6e6a83SAndy Lutomirski cld 15078a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 15089b6e6a83SAndy Lutomirski movq %rsp, %rdx 15099b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 15108c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 15119b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 15129b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 15139b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 15149b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 15159b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 15168c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 15179b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 15189b6e6a83SAndy Lutomirski pushq %rdi /* pt_regs->di */ 15199b6e6a83SAndy Lutomirski pushq %rsi /* pt_regs->si */ 15209b6e6a83SAndy Lutomirski pushq (%rdx) /* pt_regs->dx */ 15219b6e6a83SAndy Lutomirski pushq %rcx /* pt_regs->cx */ 15229b6e6a83SAndy Lutomirski pushq %rax /* pt_regs->ax */ 15239b6e6a83SAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 15249b6e6a83SAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 15259b6e6a83SAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 15269b6e6a83SAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 15279b6e6a83SAndy Lutomirski pushq %rbx /* pt_regs->rbx */ 15289b6e6a83SAndy Lutomirski pushq %rbp /* pt_regs->rbp */ 15299b6e6a83SAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 15309b6e6a83SAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 15319b6e6a83SAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 15329b6e6a83SAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 15338c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1534946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 15359b6e6a83SAndy Lutomirski 15369b6e6a83SAndy Lutomirski /* 15379b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 15389b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 15399b6e6a83SAndy Lutomirski * done with the NMI stack. 15409b6e6a83SAndy Lutomirski */ 15419b6e6a83SAndy Lutomirski 15429b6e6a83SAndy Lutomirski movq %rsp, %rdi 15439b6e6a83SAndy Lutomirski movq $-1, %rsi 15449b6e6a83SAndy Lutomirski call do_nmi 15459b6e6a83SAndy Lutomirski 15469b6e6a83SAndy Lutomirski /* 15479b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1548946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 15499b6e6a83SAndy Lutomirski */ 15508a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 15519b6e6a83SAndy Lutomirski 15529b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 15539b6e6a83SAndy Lutomirski /* 15540b22930eSAndy Lutomirski * Here's what our stack frame will look like: 15550b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15560b22930eSAndy Lutomirski * | original SS | 15570b22930eSAndy Lutomirski * | original Return RSP | 15580b22930eSAndy Lutomirski * | original RFLAGS | 15590b22930eSAndy Lutomirski * | original CS | 15600b22930eSAndy Lutomirski * | original RIP | 15610b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15620b22930eSAndy Lutomirski * | temp storage for rdx | 15630b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15640b22930eSAndy Lutomirski * | "NMI executing" variable | 15650b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15660b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 15670b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 15680b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 15690b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 15700b22930eSAndy Lutomirski * | iret RIP } | 15710b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15720b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 15730b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 15740b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 15750b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 15760b22930eSAndy Lutomirski * | outermost RIP } iteration. | 15770b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15780b22930eSAndy Lutomirski * | pt_regs | 15790b22930eSAndy Lutomirski * +---------------------------------------------------------+ 15800b22930eSAndy Lutomirski * 15810b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 15820b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 15830b22930eSAndy Lutomirski * space for the asm code here. 15840b22930eSAndy Lutomirski * 15850b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 15860b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 15870b22930eSAndy Lutomirski * processing. 15880b22930eSAndy Lutomirski * 15890b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 15900b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 15910b22930eSAndy Lutomirski * frame pointing to the final return target. 15920b22930eSAndy Lutomirski */ 15930b22930eSAndy Lutomirski 15940b22930eSAndy Lutomirski /* 15950b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 15960b22930eSAndy Lutomirski * 1597a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1598a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1599a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1600a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 1601a27507caSAndy Lutomirski * about to about to call do_nmi anyway, so we can just 1602a27507caSAndy Lutomirski * resume the outer NMI. 1603a27507caSAndy Lutomirski */ 1604a27507caSAndy Lutomirski 1605a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1606a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1607a27507caSAndy Lutomirski ja 1f 1608a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1609a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1610a27507caSAndy Lutomirski ja nested_nmi_out 1611a27507caSAndy Lutomirski1: 1612a27507caSAndy Lutomirski 1613a27507caSAndy Lutomirski /* 1614a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 16150b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 16160b22930eSAndy Lutomirski * before IRET. 1617905a36a2SIngo Molnar */ 1618905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1619905a36a2SIngo Molnar je nested_nmi 1620905a36a2SIngo Molnar 1621905a36a2SIngo Molnar /* 16220b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 16230b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1624810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1625810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1626810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1627810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1628810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1629810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1630810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1631810bc075SAndy Lutomirski * "NMI executing". 1632905a36a2SIngo Molnar */ 1633905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1634905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1635905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1636905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1637905a36a2SIngo Molnar ja first_nmi 16384d732138SIngo Molnar 1639905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1640905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1641905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1642905a36a2SIngo Molnar jb first_nmi 1643810bc075SAndy Lutomirski 1644810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1645810bc075SAndy Lutomirski 1646810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1647810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1648810bc075SAndy Lutomirski 1649810bc075SAndy Lutomirski /* This is a nested NMI. */ 1650905a36a2SIngo Molnar 1651905a36a2SIngo Molnarnested_nmi: 1652905a36a2SIngo Molnar /* 16530b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 16540b22930eSAndy Lutomirski * iteration of NMI handling. 1655905a36a2SIngo Molnar */ 165623a781e9SAndy Lutomirski subq $8, %rsp 1657905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1658905a36a2SIngo Molnar pushq $__KERNEL_DS 1659905a36a2SIngo Molnar pushq %rdx 1660905a36a2SIngo Molnar pushfq 1661905a36a2SIngo Molnar pushq $__KERNEL_CS 1662905a36a2SIngo Molnar pushq $repeat_nmi 1663905a36a2SIngo Molnar 1664905a36a2SIngo Molnar /* Put stack back */ 1665905a36a2SIngo Molnar addq $(6*8), %rsp 1666905a36a2SIngo Molnar 1667905a36a2SIngo Molnarnested_nmi_out: 1668905a36a2SIngo Molnar popq %rdx 1669905a36a2SIngo Molnar 16700b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1671929bacecSAndy Lutomirski iretq 1672905a36a2SIngo Molnar 1673905a36a2SIngo Molnarfirst_nmi: 16740b22930eSAndy Lutomirski /* Restore rdx. */ 1675905a36a2SIngo Molnar movq (%rsp), %rdx 1676905a36a2SIngo Molnar 167736f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 167836f1a77bSAndy Lutomirski pushq $0 1679905a36a2SIngo Molnar 16800b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1681905a36a2SIngo Molnar subq $(5*8), %rsp 1682905a36a2SIngo Molnar 16830b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1684905a36a2SIngo Molnar .rept 5 1685905a36a2SIngo Molnar pushq 11*8(%rsp) 1686905a36a2SIngo Molnar .endr 16878c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1688905a36a2SIngo Molnar 1689905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1690905a36a2SIngo Molnar 1691a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1692a97439aaSAndy Lutomirski /* 1693a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1694a97439aaSAndy Lutomirski * default because IRET is very expensive. 1695a97439aaSAndy Lutomirski */ 1696a97439aaSAndy Lutomirski pushq $0 /* SS */ 1697a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1698a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1699a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1700a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1701a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1702929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 17038c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1704a97439aaSAndy Lutomirski1: 1705a97439aaSAndy Lutomirski#endif 1706a97439aaSAndy Lutomirski 17070b22930eSAndy Lutomirskirepeat_nmi: 1708905a36a2SIngo Molnar /* 1709905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1710905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1711905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1712905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1713905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1714905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1715905a36a2SIngo Molnar * NMI will update. 17160b22930eSAndy Lutomirski * 17170b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 17180b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 17190b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 172036f1a77bSAndy Lutomirski * gsbase if needed before we call do_nmi. "NMI executing" 172136f1a77bSAndy Lutomirski * is zero. 1722905a36a2SIngo Molnar */ 172336f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1724905a36a2SIngo Molnar 17250b22930eSAndy Lutomirski /* 17260b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 17270b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 17280b22930eSAndy Lutomirski * it or it will end up containing garbage. 17290b22930eSAndy Lutomirski */ 1730905a36a2SIngo Molnar addq $(10*8), %rsp 1731905a36a2SIngo Molnar .rept 5 1732905a36a2SIngo Molnar pushq -6*8(%rsp) 1733905a36a2SIngo Molnar .endr 1734905a36a2SIngo Molnar subq $(5*8), %rsp 1735905a36a2SIngo Molnarend_repeat_nmi: 1736905a36a2SIngo Molnar 1737905a36a2SIngo Molnar /* 17380b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 17390b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 17400b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1741905a36a2SIngo Molnar */ 1742905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1743905a36a2SIngo Molnar ALLOC_PT_GPREGS_ON_STACK 1744905a36a2SIngo Molnar 1745905a36a2SIngo Molnar /* 1746905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1747905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1748905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1749905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1750905a36a2SIngo Molnar * exceptions might do. 1751905a36a2SIngo Molnar */ 1752905a36a2SIngo Molnar call paranoid_entry 17538c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1754905a36a2SIngo Molnar 1755905a36a2SIngo Molnar /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1756905a36a2SIngo Molnar movq %rsp, %rdi 1757905a36a2SIngo Molnar movq $-1, %rsi 1758905a36a2SIngo Molnar call do_nmi 1759905a36a2SIngo Molnar 176021e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 17618a09317bSDave Hansen 1762905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1763905a36a2SIngo Molnar jnz nmi_restore 1764905a36a2SIngo Molnarnmi_swapgs: 1765905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1766905a36a2SIngo Molnarnmi_restore: 1767471ee483SAndy Lutomirski POP_EXTRA_REGS 1768471ee483SAndy Lutomirski POP_C_REGS 17690b22930eSAndy Lutomirski 1770471ee483SAndy Lutomirski /* 1771471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1772471ee483SAndy Lutomirski * at the "iret" frame. 1773471ee483SAndy Lutomirski */ 1774471ee483SAndy Lutomirski addq $6*8, %rsp 1775905a36a2SIngo Molnar 1776810bc075SAndy Lutomirski /* 1777810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1778810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1779929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1780929bacecSAndy Lutomirski * 1781929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1782929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1783929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1784810bc075SAndy Lutomirski */ 1785810bc075SAndy Lutomirski std 1786810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 17870b22930eSAndy Lutomirski 17880b22930eSAndy Lutomirski /* 1789929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1790929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1791929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1792929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 17930b22930eSAndy Lutomirski */ 1794929bacecSAndy Lutomirski iretq 1795905a36a2SIngo MolnarEND(nmi) 1796905a36a2SIngo Molnar 1797905a36a2SIngo MolnarENTRY(ignore_sysret) 17988c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1799905a36a2SIngo Molnar mov $-ENOSYS, %eax 1800905a36a2SIngo Molnar sysret 1801905a36a2SIngo MolnarEND(ignore_sysret) 18022deb4be2SAndy Lutomirski 18032deb4be2SAndy LutomirskiENTRY(rewind_stack_do_exit) 18048c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 18052deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 18062deb4be2SAndy Lutomirski xorl %ebp, %ebp 18072deb4be2SAndy Lutomirski 18082deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 18098c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 18108c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 18112deb4be2SAndy Lutomirski 18122deb4be2SAndy Lutomirski call do_exit 18132deb4be2SAndy LutomirskiEND(rewind_stack_do_exit) 1814