1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11905a36a2SIngo Molnar * Some of this is documented in Documentation/x86/entry_64.txt 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 184d732138SIngo Molnar * - ENTRY/END: Define functions in the symbol table. 194d732138SIngo Molnar * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 204d732138SIngo Molnar * - idtentry: Define exception entry points. 21905a36a2SIngo Molnar */ 22905a36a2SIngo Molnar#include <linux/linkage.h> 23905a36a2SIngo Molnar#include <asm/segment.h> 24905a36a2SIngo Molnar#include <asm/cache.h> 25905a36a2SIngo Molnar#include <asm/errno.h> 26905a36a2SIngo Molnar#include <asm/asm-offsets.h> 27905a36a2SIngo Molnar#include <asm/msr.h> 28905a36a2SIngo Molnar#include <asm/unistd.h> 29905a36a2SIngo Molnar#include <asm/thread_info.h> 30905a36a2SIngo Molnar#include <asm/hw_irq.h> 31905a36a2SIngo Molnar#include <asm/page_types.h> 32905a36a2SIngo Molnar#include <asm/irqflags.h> 33905a36a2SIngo Molnar#include <asm/paravirt.h> 34905a36a2SIngo Molnar#include <asm/percpu.h> 35905a36a2SIngo Molnar#include <asm/asm.h> 36905a36a2SIngo Molnar#include <asm/smap.h> 37905a36a2SIngo Molnar#include <asm/pgtable_types.h> 38784d5699SAl Viro#include <asm/export.h> 398c1f7558SJosh Poimboeuf#include <asm/frame.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41905a36a2SIngo Molnar#include <linux/err.h> 42905a36a2SIngo Molnar 436fd166aaSPeter Zijlstra#include "calling.h" 446fd166aaSPeter Zijlstra 45905a36a2SIngo Molnar.code64 46905a36a2SIngo Molnar.section .entry.text, "ax" 47905a36a2SIngo Molnar 48905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT 49905a36a2SIngo MolnarENTRY(native_usergs_sysret64) 508c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 51905a36a2SIngo Molnar swapgs 52905a36a2SIngo Molnar sysretq 538c1f7558SJosh PoimboeufEND(native_usergs_sysret64) 54905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */ 55905a36a2SIngo Molnar 56ca37e57bSAndy Lutomirski.macro TRACE_IRQS_FLAGS flags:req 57905a36a2SIngo Molnar#ifdef CONFIG_TRACE_IRQFLAGS 58ca37e57bSAndy Lutomirski bt $9, \flags /* interrupts off? */ 59905a36a2SIngo Molnar jnc 1f 60905a36a2SIngo Molnar TRACE_IRQS_ON 61905a36a2SIngo Molnar1: 62905a36a2SIngo Molnar#endif 63905a36a2SIngo Molnar.endm 64905a36a2SIngo Molnar 65ca37e57bSAndy Lutomirski.macro TRACE_IRQS_IRETQ 66ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS EFLAGS(%rsp) 67ca37e57bSAndy Lutomirski.endm 68ca37e57bSAndy Lutomirski 69905a36a2SIngo Molnar/* 70905a36a2SIngo Molnar * When dynamic function tracer is enabled it will add a breakpoint 71905a36a2SIngo Molnar * to all locations that it is about to modify, sync CPUs, update 72905a36a2SIngo Molnar * all the code, sync CPUs, then remove the breakpoints. In this time 73905a36a2SIngo Molnar * if lockdep is enabled, it might jump back into the debug handler 74905a36a2SIngo Molnar * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 75905a36a2SIngo Molnar * 76905a36a2SIngo Molnar * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 77905a36a2SIngo Molnar * make sure the stack pointer does not get reset back to the top 78905a36a2SIngo Molnar * of the debug stack, and instead just reuses the current stack. 79905a36a2SIngo Molnar */ 80905a36a2SIngo Molnar#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 81905a36a2SIngo Molnar 82905a36a2SIngo Molnar.macro TRACE_IRQS_OFF_DEBUG 83905a36a2SIngo Molnar call debug_stack_set_zero 84905a36a2SIngo Molnar TRACE_IRQS_OFF 85905a36a2SIngo Molnar call debug_stack_reset 86905a36a2SIngo Molnar.endm 87905a36a2SIngo Molnar 88905a36a2SIngo Molnar.macro TRACE_IRQS_ON_DEBUG 89905a36a2SIngo Molnar call debug_stack_set_zero 90905a36a2SIngo Molnar TRACE_IRQS_ON 91905a36a2SIngo Molnar call debug_stack_reset 92905a36a2SIngo Molnar.endm 93905a36a2SIngo Molnar 94905a36a2SIngo Molnar.macro TRACE_IRQS_IRETQ_DEBUG 95905a36a2SIngo Molnar bt $9, EFLAGS(%rsp) /* interrupts off? */ 96905a36a2SIngo Molnar jnc 1f 97905a36a2SIngo Molnar TRACE_IRQS_ON_DEBUG 98905a36a2SIngo Molnar1: 99905a36a2SIngo Molnar.endm 100905a36a2SIngo Molnar 101905a36a2SIngo Molnar#else 102905a36a2SIngo Molnar# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 103905a36a2SIngo Molnar# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 104905a36a2SIngo Molnar# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 105905a36a2SIngo Molnar#endif 106905a36a2SIngo Molnar 107905a36a2SIngo Molnar/* 1084d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 109905a36a2SIngo Molnar * 110fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 111fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 112fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 113fda57b22SAndy Lutomirski * available when SYSCALL is used. 114fda57b22SAndy Lutomirski * 115fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 116fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 117fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 118fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 119fda57b22SAndy Lutomirski * 1204d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 121905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 122905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 123905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 124905a36a2SIngo Molnar * and does not change rsp. 125905a36a2SIngo Molnar * 126905a36a2SIngo Molnar * Registers on entry: 127905a36a2SIngo Molnar * rax system call number 128905a36a2SIngo Molnar * rcx return address 129905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 130905a36a2SIngo Molnar * rdi arg0 131905a36a2SIngo Molnar * rsi arg1 132905a36a2SIngo Molnar * rdx arg2 133905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 134905a36a2SIngo Molnar * r8 arg4 135905a36a2SIngo Molnar * r9 arg5 136905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 137905a36a2SIngo Molnar * 138905a36a2SIngo Molnar * Only called from user space. 139905a36a2SIngo Molnar * 140905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 141905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 142905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 143905a36a2SIngo Molnar */ 144905a36a2SIngo Molnar 1453386bc8aSAndy Lutomirski .pushsection .entry_trampoline, "ax" 1463386bc8aSAndy Lutomirski 1473386bc8aSAndy Lutomirski/* 1483386bc8aSAndy Lutomirski * The code in here gets remapped into cpu_entry_area's trampoline. This means 1493386bc8aSAndy Lutomirski * that the assembler and linker have the wrong idea as to where this code 1503386bc8aSAndy Lutomirski * lives (and, in fact, it's mapped more than once, so it's not even at a 1513386bc8aSAndy Lutomirski * fixed address). So we can't reference any symbols outside the entry 1523386bc8aSAndy Lutomirski * trampoline and expect it to work. 1533386bc8aSAndy Lutomirski * 1543386bc8aSAndy Lutomirski * Instead, we carefully abuse %rip-relative addressing. 1553386bc8aSAndy Lutomirski * _entry_trampoline(%rip) refers to the start of the remapped) entry 1563386bc8aSAndy Lutomirski * trampoline. We can thus find cpu_entry_area with this macro: 1573386bc8aSAndy Lutomirski */ 1583386bc8aSAndy Lutomirski 1593386bc8aSAndy Lutomirski#define CPU_ENTRY_AREA \ 1603386bc8aSAndy Lutomirski _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) 1613386bc8aSAndy Lutomirski 1623386bc8aSAndy Lutomirski/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ 1634fe2d8b1SDave Hansen#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ 1644fe2d8b1SDave Hansen SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA 1653386bc8aSAndy Lutomirski 1663386bc8aSAndy LutomirskiENTRY(entry_SYSCALL_64_trampoline) 1673386bc8aSAndy Lutomirski UNWIND_HINT_EMPTY 1683386bc8aSAndy Lutomirski swapgs 1693386bc8aSAndy Lutomirski 1703386bc8aSAndy Lutomirski /* Stash the user RSP. */ 1713386bc8aSAndy Lutomirski movq %rsp, RSP_SCRATCH 1723386bc8aSAndy Lutomirski 1738a09317bSDave Hansen /* Note: using %rsp as a scratch reg. */ 1748a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 1758a09317bSDave Hansen 1763386bc8aSAndy Lutomirski /* Load the top of the task stack into RSP */ 1773386bc8aSAndy Lutomirski movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp 1783386bc8aSAndy Lutomirski 1793386bc8aSAndy Lutomirski /* Start building the simulated IRET frame. */ 1803386bc8aSAndy Lutomirski pushq $__USER_DS /* pt_regs->ss */ 1813386bc8aSAndy Lutomirski pushq RSP_SCRATCH /* pt_regs->sp */ 1823386bc8aSAndy Lutomirski pushq %r11 /* pt_regs->flags */ 1833386bc8aSAndy Lutomirski pushq $__USER_CS /* pt_regs->cs */ 1843386bc8aSAndy Lutomirski pushq %rcx /* pt_regs->ip */ 1853386bc8aSAndy Lutomirski 1863386bc8aSAndy Lutomirski /* 1873386bc8aSAndy Lutomirski * x86 lacks a near absolute jump, and we can't jump to the real 1883386bc8aSAndy Lutomirski * entry text with a relative jump. We could push the target 1893386bc8aSAndy Lutomirski * address and then use retq, but this destroys the pipeline on 1903386bc8aSAndy Lutomirski * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, 1913386bc8aSAndy Lutomirski * spill RDI and restore it in a second-stage trampoline. 1923386bc8aSAndy Lutomirski */ 1933386bc8aSAndy Lutomirski pushq %rdi 1943386bc8aSAndy Lutomirski movq $entry_SYSCALL_64_stage2, %rdi 1952641f08bSDavid Woodhouse JMP_NOSPEC %rdi 1963386bc8aSAndy LutomirskiEND(entry_SYSCALL_64_trampoline) 1973386bc8aSAndy Lutomirski 1983386bc8aSAndy Lutomirski .popsection 1993386bc8aSAndy Lutomirski 2003386bc8aSAndy LutomirskiENTRY(entry_SYSCALL_64_stage2) 2013386bc8aSAndy Lutomirski UNWIND_HINT_EMPTY 2023386bc8aSAndy Lutomirski popq %rdi 2033386bc8aSAndy Lutomirski jmp entry_SYSCALL_64_after_hwframe 2043386bc8aSAndy LutomirskiEND(entry_SYSCALL_64_stage2) 2053386bc8aSAndy Lutomirski 206b2502b41SIngo MolnarENTRY(entry_SYSCALL_64) 2078c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 208905a36a2SIngo Molnar /* 209905a36a2SIngo Molnar * Interrupts are off on entry. 210905a36a2SIngo Molnar * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 211905a36a2SIngo Molnar * it is too small to ever cause noticeable irq latency. 212905a36a2SIngo Molnar */ 213905a36a2SIngo Molnar 2148a9949bcSAndy Lutomirski swapgs 2158a09317bSDave Hansen /* 21614b1fcc6SNadav Amit * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it 2178a09317bSDave Hansen * is not required to switch CR3. 2188a09317bSDave Hansen */ 219905a36a2SIngo Molnar movq %rsp, PER_CPU_VAR(rsp_scratch) 220905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 221905a36a2SIngo Molnar 222905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 223905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 224905a36a2SIngo Molnar pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 225905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 226905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 227905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 2288a9949bcSAndy LutomirskiGLOBAL(entry_SYSCALL_64_after_hwframe) 229905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 23030907fd1SDominik Brodowski 23130907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rax=$-ENOSYS 232905a36a2SIngo Molnar 233548c3050SAndy Lutomirski TRACE_IRQS_OFF 234548c3050SAndy Lutomirski 2351e423bffSAndy Lutomirski /* IRQs are off. */ 2361e423bffSAndy Lutomirski movq %rsp, %rdi 2371e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 2381e423bffSAndy Lutomirski 23929ea1b25SAndy Lutomirski TRACE_IRQS_IRETQ /* we're about to change IF */ 240905a36a2SIngo Molnar 241905a36a2SIngo Molnar /* 242905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 2438a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 2448a055d7fSAndy Lutomirski * go to the slow exit path. 245905a36a2SIngo Molnar */ 246905a36a2SIngo Molnar movq RCX(%rsp), %rcx 247905a36a2SIngo Molnar movq RIP(%rsp), %r11 2488a055d7fSAndy Lutomirski 2498a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 2508a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 251905a36a2SIngo Molnar 252905a36a2SIngo Molnar /* 253905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 254905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 255905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 256905a36a2SIngo Molnar * 257905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 258905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 259361b4b58SKirill A. Shutemov * 260cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 261cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 262905a36a2SIngo Molnar */ 263905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 264905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 2654d732138SIngo Molnar 266905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 267905a36a2SIngo Molnar cmpq %rcx, %r11 2688a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 269905a36a2SIngo Molnar 270905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 2718a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 272905a36a2SIngo Molnar 273905a36a2SIngo Molnar movq R11(%rsp), %r11 274905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 2758a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 276905a36a2SIngo Molnar 277905a36a2SIngo Molnar /* 2783e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 2793e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 2803e035305SBorislav Petkov * need to restore it correctly. 2813e035305SBorislav Petkov * 2823e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 2833e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 2843e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 2853e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 2863e035305SBorislav Petkov * this user code: 287905a36a2SIngo Molnar * 288905a36a2SIngo Molnar * movq $stuck_here, %rcx 289905a36a2SIngo Molnar * pushfq 290905a36a2SIngo Molnar * popq %r11 291905a36a2SIngo Molnar * stuck_here: 292905a36a2SIngo Molnar * 293905a36a2SIngo Molnar * would never get past 'stuck_here'. 294905a36a2SIngo Molnar */ 295905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 2968a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 297905a36a2SIngo Molnar 298905a36a2SIngo Molnar /* nothing to check for RSP */ 299905a36a2SIngo Molnar 300905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 3018a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 302905a36a2SIngo Molnar 303905a36a2SIngo Molnar /* 304905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 305905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 306905a36a2SIngo Molnar */ 307905a36a2SIngo Molnarsyscall_return_via_sysret: 308905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 3098c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 310502af0d7SDominik Brodowski POP_REGS pop_rdi=0 skip_r11rcx=1 3113e3b9293SAndy Lutomirski 3123e3b9293SAndy Lutomirski /* 3133e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 3143e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 3153e3b9293SAndy Lutomirski */ 3163e3b9293SAndy Lutomirski movq %rsp, %rdi 317c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 3183e3b9293SAndy Lutomirski 3193e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 3203e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 3213e3b9293SAndy Lutomirski 3223e3b9293SAndy Lutomirski /* 3233e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 3243e3b9293SAndy Lutomirski * We can do future final exit work right here. 3253e3b9293SAndy Lutomirski */ 3266fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 3273e3b9293SAndy Lutomirski 3284fbb3910SAndy Lutomirski popq %rdi 3293e3b9293SAndy Lutomirski popq %rsp 330905a36a2SIngo Molnar USERGS_SYSRET64 331b2502b41SIngo MolnarEND(entry_SYSCALL_64) 332905a36a2SIngo Molnar 333905a36a2SIngo Molnar/* 3340100301bSBrian Gerst * %rdi: prev task 3350100301bSBrian Gerst * %rsi: next task 3360100301bSBrian Gerst */ 3370100301bSBrian GerstENTRY(__switch_to_asm) 3388c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 3390100301bSBrian Gerst /* 3400100301bSBrian Gerst * Save callee-saved registers 3410100301bSBrian Gerst * This must match the order in inactive_task_frame 3420100301bSBrian Gerst */ 3430100301bSBrian Gerst pushq %rbp 3440100301bSBrian Gerst pushq %rbx 3450100301bSBrian Gerst pushq %r12 3460100301bSBrian Gerst pushq %r13 3470100301bSBrian Gerst pushq %r14 3480100301bSBrian Gerst pushq %r15 3490100301bSBrian Gerst 3500100301bSBrian Gerst /* switch stack */ 3510100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 3520100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 3530100301bSBrian Gerst 3540100301bSBrian Gerst#ifdef CONFIG_CC_STACKPROTECTOR 3550100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 3560100301bSBrian Gerst movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 3570100301bSBrian Gerst#endif 3580100301bSBrian Gerst 359c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 360c995efd5SDavid Woodhouse /* 361c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 362c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 363c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 364c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 365c995efd5SDavid Woodhouse * speculative execution to prevent attack. 366c995efd5SDavid Woodhouse */ 367d1c99108SDavid Woodhouse FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 368c995efd5SDavid Woodhouse#endif 369c995efd5SDavid Woodhouse 3700100301bSBrian Gerst /* restore callee-saved registers */ 3710100301bSBrian Gerst popq %r15 3720100301bSBrian Gerst popq %r14 3730100301bSBrian Gerst popq %r13 3740100301bSBrian Gerst popq %r12 3750100301bSBrian Gerst popq %rbx 3760100301bSBrian Gerst popq %rbp 3770100301bSBrian Gerst 3780100301bSBrian Gerst jmp __switch_to 3790100301bSBrian GerstEND(__switch_to_asm) 3800100301bSBrian Gerst 3810100301bSBrian Gerst/* 382905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 383905a36a2SIngo Molnar * 3840100301bSBrian Gerst * rax: prev task we switched from 385616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 386616d2483SBrian Gerst * r12: kernel thread arg 387905a36a2SIngo Molnar */ 388905a36a2SIngo MolnarENTRY(ret_from_fork) 3898c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 3900100301bSBrian Gerst movq %rax, %rdi 3914d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 392905a36a2SIngo Molnar 393616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 394616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 395905a36a2SIngo Molnar 396616d2483SBrian Gerst2: 3978c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 398ebd57499SJosh Poimboeuf movq %rsp, %rdi 39924d978b7SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 40024d978b7SAndy Lutomirski TRACE_IRQS_ON /* user mode is traced as IRQS on */ 4018a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 402616d2483SBrian Gerst 403616d2483SBrian Gerst1: 404616d2483SBrian Gerst /* kernel thread */ 405616d2483SBrian Gerst movq %r12, %rdi 4062641f08bSDavid Woodhouse CALL_NOSPEC %rbx 407616d2483SBrian Gerst /* 408616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 409616d2483SBrian Gerst * calling do_execve(). Exit to userspace to complete the execve() 410616d2483SBrian Gerst * syscall. 411616d2483SBrian Gerst */ 412616d2483SBrian Gerst movq $0, RAX(%rsp) 413616d2483SBrian Gerst jmp 2b 414905a36a2SIngo MolnarEND(ret_from_fork) 415905a36a2SIngo Molnar 416905a36a2SIngo Molnar/* 417905a36a2SIngo Molnar * Build the entry stubs with some assembler magic. 418905a36a2SIngo Molnar * We pack 1 stub into every 8-byte block. 419905a36a2SIngo Molnar */ 420905a36a2SIngo Molnar .align 8 421905a36a2SIngo MolnarENTRY(irq_entries_start) 422905a36a2SIngo Molnar vector=FIRST_EXTERNAL_VECTOR 423905a36a2SIngo Molnar .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 4248c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 425905a36a2SIngo Molnar pushq $(~vector+0x80) /* Note: always in signed byte range */ 426905a36a2SIngo Molnar jmp common_interrupt 427905a36a2SIngo Molnar .align 8 4288c1f7558SJosh Poimboeuf vector=vector+1 429905a36a2SIngo Molnar .endr 430905a36a2SIngo MolnarEND(irq_entries_start) 431905a36a2SIngo Molnar 4321d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 4331d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 434e17f8234SBoris Ostrovsky pushq %rax 435e17f8234SBoris Ostrovsky SAVE_FLAGS(CLBR_RAX) 436e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 4371d3e53e8SAndy Lutomirski jz .Lokay_\@ 4381d3e53e8SAndy Lutomirski ud2 4391d3e53e8SAndy Lutomirski.Lokay_\@: 440e17f8234SBoris Ostrovsky popq %rax 4411d3e53e8SAndy Lutomirski#endif 4421d3e53e8SAndy Lutomirski.endm 4431d3e53e8SAndy Lutomirski 4441d3e53e8SAndy Lutomirski/* 4451d3e53e8SAndy Lutomirski * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 4461d3e53e8SAndy Lutomirski * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 4471d3e53e8SAndy Lutomirski * Requires kernel GSBASE. 4481d3e53e8SAndy Lutomirski * 4491d3e53e8SAndy Lutomirski * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 4501d3e53e8SAndy Lutomirski */ 4512ba64741SDominik Brodowski.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 4521d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 4532ba64741SDominik Brodowski 4542ba64741SDominik Brodowski .if \save_ret 4552ba64741SDominik Brodowski /* 4562ba64741SDominik Brodowski * If save_ret is set, the original stack contains one additional 4572ba64741SDominik Brodowski * entry -- the return address. Therefore, move the address one 4582ba64741SDominik Brodowski * entry below %rsp to \old_rsp. 4592ba64741SDominik Brodowski */ 4602ba64741SDominik Brodowski leaq 8(%rsp), \old_rsp 4612ba64741SDominik Brodowski .else 4621d3e53e8SAndy Lutomirski movq %rsp, \old_rsp 4632ba64741SDominik Brodowski .endif 4648c1f7558SJosh Poimboeuf 4658c1f7558SJosh Poimboeuf .if \regs 4668c1f7558SJosh Poimboeuf UNWIND_HINT_REGS base=\old_rsp 4678c1f7558SJosh Poimboeuf .endif 4688c1f7558SJosh Poimboeuf 4691d3e53e8SAndy Lutomirski incl PER_CPU_VAR(irq_count) 47029955909SAndy Lutomirski jnz .Lirq_stack_push_old_rsp_\@ 4711d3e53e8SAndy Lutomirski 4721d3e53e8SAndy Lutomirski /* 4731d3e53e8SAndy Lutomirski * Right now, if we just incremented irq_count to zero, we've 4741d3e53e8SAndy Lutomirski * claimed the IRQ stack but we haven't switched to it yet. 4751d3e53e8SAndy Lutomirski * 4761d3e53e8SAndy Lutomirski * If anything is added that can interrupt us here without using IST, 4771d3e53e8SAndy Lutomirski * it must be *extremely* careful to limit its stack usage. This 4781d3e53e8SAndy Lutomirski * could include kprobes and a hypothetical future IST-less #DB 4791d3e53e8SAndy Lutomirski * handler. 48029955909SAndy Lutomirski * 48129955909SAndy Lutomirski * The OOPS unwinder relies on the word at the top of the IRQ 48229955909SAndy Lutomirski * stack linking back to the previous RSP for the entire time we're 48329955909SAndy Lutomirski * on the IRQ stack. For this to work reliably, we need to write 48429955909SAndy Lutomirski * it before we actually move ourselves to the IRQ stack. 4851d3e53e8SAndy Lutomirski */ 4861d3e53e8SAndy Lutomirski 48729955909SAndy Lutomirski movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 48829955909SAndy Lutomirski movq PER_CPU_VAR(irq_stack_ptr), %rsp 48929955909SAndy Lutomirski 49029955909SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 49129955909SAndy Lutomirski /* 49229955909SAndy Lutomirski * If the first movq above becomes wrong due to IRQ stack layout 49329955909SAndy Lutomirski * changes, the only way we'll notice is if we try to unwind right 49429955909SAndy Lutomirski * here. Assert that we set up the stack right to catch this type 49529955909SAndy Lutomirski * of bug quickly. 49629955909SAndy Lutomirski */ 49729955909SAndy Lutomirski cmpq -8(%rsp), \old_rsp 49829955909SAndy Lutomirski je .Lirq_stack_okay\@ 49929955909SAndy Lutomirski ud2 50029955909SAndy Lutomirski .Lirq_stack_okay\@: 50129955909SAndy Lutomirski#endif 50229955909SAndy Lutomirski 50329955909SAndy Lutomirski.Lirq_stack_push_old_rsp_\@: 5041d3e53e8SAndy Lutomirski pushq \old_rsp 5058c1f7558SJosh Poimboeuf 5068c1f7558SJosh Poimboeuf .if \regs 5078c1f7558SJosh Poimboeuf UNWIND_HINT_REGS indirect=1 5088c1f7558SJosh Poimboeuf .endif 5092ba64741SDominik Brodowski 5102ba64741SDominik Brodowski .if \save_ret 5112ba64741SDominik Brodowski /* 5122ba64741SDominik Brodowski * Push the return address to the stack. This return address can 5132ba64741SDominik Brodowski * be found at the "real" original RSP, which was offset by 8 at 5142ba64741SDominik Brodowski * the beginning of this macro. 5152ba64741SDominik Brodowski */ 5162ba64741SDominik Brodowski pushq -8(\old_rsp) 5172ba64741SDominik Brodowski .endif 5181d3e53e8SAndy Lutomirski.endm 5191d3e53e8SAndy Lutomirski 5201d3e53e8SAndy Lutomirski/* 5211d3e53e8SAndy Lutomirski * Undoes ENTER_IRQ_STACK. 5221d3e53e8SAndy Lutomirski */ 5238c1f7558SJosh Poimboeuf.macro LEAVE_IRQ_STACK regs=1 5241d3e53e8SAndy Lutomirski DEBUG_ENTRY_ASSERT_IRQS_OFF 5251d3e53e8SAndy Lutomirski /* We need to be off the IRQ stack before decrementing irq_count. */ 5261d3e53e8SAndy Lutomirski popq %rsp 5271d3e53e8SAndy Lutomirski 5288c1f7558SJosh Poimboeuf .if \regs 5298c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 5308c1f7558SJosh Poimboeuf .endif 5318c1f7558SJosh Poimboeuf 5321d3e53e8SAndy Lutomirski /* 5331d3e53e8SAndy Lutomirski * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 5341d3e53e8SAndy Lutomirski * the irq stack but we're not on it. 5351d3e53e8SAndy Lutomirski */ 5361d3e53e8SAndy Lutomirski 5371d3e53e8SAndy Lutomirski decl PER_CPU_VAR(irq_count) 5381d3e53e8SAndy Lutomirski.endm 5391d3e53e8SAndy Lutomirski 540905a36a2SIngo Molnar/* 541*90a6acc4SDominik Brodowski * Switch to the thread stack. This is called with the IRET frame and 542*90a6acc4SDominik Brodowski * orig_ax on the stack. (That is, RDI..R12 are not on the stack and 543*90a6acc4SDominik Brodowski * space has not been allocated for them.) 544*90a6acc4SDominik Brodowski */ 545*90a6acc4SDominik Brodowski.macro DO_SWITCH_TO_THREAD_STACK 546*90a6acc4SDominik Brodowski pushq %rdi 547*90a6acc4SDominik Brodowski /* Need to switch before accessing the thread stack. */ 548*90a6acc4SDominik Brodowski SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 549*90a6acc4SDominik Brodowski movq %rsp, %rdi 550*90a6acc4SDominik Brodowski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 551*90a6acc4SDominik Brodowski UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI 552*90a6acc4SDominik Brodowski 553*90a6acc4SDominik Brodowski pushq 7*8(%rdi) /* regs->ss */ 554*90a6acc4SDominik Brodowski pushq 6*8(%rdi) /* regs->rsp */ 555*90a6acc4SDominik Brodowski pushq 5*8(%rdi) /* regs->eflags */ 556*90a6acc4SDominik Brodowski pushq 4*8(%rdi) /* regs->cs */ 557*90a6acc4SDominik Brodowski pushq 3*8(%rdi) /* regs->ip */ 558*90a6acc4SDominik Brodowski pushq 2*8(%rdi) /* regs->orig_ax */ 559*90a6acc4SDominik Brodowski pushq 8(%rdi) /* return address */ 560*90a6acc4SDominik Brodowski UNWIND_HINT_FUNC 561*90a6acc4SDominik Brodowski 562*90a6acc4SDominik Brodowski movq (%rdi), %rdi 563*90a6acc4SDominik Brodowski.endm 564*90a6acc4SDominik Brodowski 565*90a6acc4SDominik Brodowski/* 566905a36a2SIngo Molnar * Interrupt entry/exit. 567905a36a2SIngo Molnar * 568905a36a2SIngo Molnar * Interrupt entry points save only callee clobbered registers in fast path. 569905a36a2SIngo Molnar * 570905a36a2SIngo Molnar * Entry runs with interrupts off. 571905a36a2SIngo Molnar */ 572*90a6acc4SDominik Brodowski/* 8(%rsp): ~(interrupt number) */ 5730e34d226SDominik BrodowskiENTRY(interrupt_entry) 5740e34d226SDominik Brodowski UNWIND_HINT_FUNC 575*90a6acc4SDominik Brodowski cld 576*90a6acc4SDominik Brodowski 577*90a6acc4SDominik Brodowski testb $3, CS-ORIG_RAX+8(%rsp) 578*90a6acc4SDominik Brodowski jz 1f 579*90a6acc4SDominik Brodowski SWAPGS 580*90a6acc4SDominik Brodowski DO_SWITCH_TO_THREAD_STACK 581*90a6acc4SDominik Brodowski1: 5820e34d226SDominik Brodowski 5830e34d226SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 5840e34d226SDominik Brodowski ENCODE_FRAME_POINTER 8 5850e34d226SDominik Brodowski 5862ba64741SDominik Brodowski testb $3, CS+8(%rsp) 587905a36a2SIngo Molnar jz 1f 58802bc7768SAndy Lutomirski 58902bc7768SAndy Lutomirski /* 5907f2590a1SAndy Lutomirski * IRQ from user mode. 5917f2590a1SAndy Lutomirski * 592f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 593f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 594f1075053SAndy Lutomirski * (which can take locks). Since TRACE_IRQS_OFF idempotent, 595f1075053SAndy Lutomirski * the simplest way to handle it is to just call it twice if 596f1075053SAndy Lutomirski * we enter from user mode. There's no reason to optimize this since 597f1075053SAndy Lutomirski * TRACE_IRQS_OFF is a no-op if lockdep is off. 598f1075053SAndy Lutomirski */ 599f1075053SAndy Lutomirski TRACE_IRQS_OFF 600f1075053SAndy Lutomirski 601478dc89cSAndy Lutomirski CALL_enter_from_user_mode 60202bc7768SAndy Lutomirski 603905a36a2SIngo Molnar1: 6042ba64741SDominik Brodowski ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 605905a36a2SIngo Molnar /* We entered an interrupt context - irqs are off: */ 606905a36a2SIngo Molnar TRACE_IRQS_OFF 607905a36a2SIngo Molnar 6082ba64741SDominik Brodowski ret 6092ba64741SDominik BrodowskiEND(interrupt_entry) 6102ba64741SDominik Brodowski 6112ba64741SDominik Brodowski/* 0(%rsp): ~(interrupt number) */ 6122ba64741SDominik Brodowski .macro interrupt func 6132ba64741SDominik Brodowski call interrupt_entry 6142ba64741SDominik Brodowski 6152ba64741SDominik Brodowski UNWIND_HINT_REGS indirect=1 616a586f98eSAndy Lutomirski call \func /* rdi points to pt_regs */ 617905a36a2SIngo Molnar .endm 618905a36a2SIngo Molnar 619905a36a2SIngo Molnar /* 620905a36a2SIngo Molnar * The interrupt stubs push (~vector+0x80) onto the stack and 621905a36a2SIngo Molnar * then jump to common_interrupt. 622905a36a2SIngo Molnar */ 623905a36a2SIngo Molnar .p2align CONFIG_X86_L1_CACHE_SHIFT 624905a36a2SIngo Molnarcommon_interrupt: 625905a36a2SIngo Molnar ASM_CLAC 626905a36a2SIngo Molnar addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 627905a36a2SIngo Molnar interrupt do_IRQ 628905a36a2SIngo Molnar /* 0(%rsp): old RSP */ 629905a36a2SIngo Molnarret_from_intr: 6302140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 631905a36a2SIngo Molnar TRACE_IRQS_OFF 632905a36a2SIngo Molnar 6331d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 634905a36a2SIngo Molnar 635905a36a2SIngo Molnar testb $3, CS(%rsp) 636905a36a2SIngo Molnar jz retint_kernel 63702bc7768SAndy Lutomirski 638905a36a2SIngo Molnar /* Interrupt came from user space */ 63902bc7768SAndy LutomirskiGLOBAL(retint_user) 64002bc7768SAndy Lutomirski mov %rsp,%rdi 64102bc7768SAndy Lutomirski call prepare_exit_to_usermode 642905a36a2SIngo Molnar TRACE_IRQS_IRETQ 64326c4ef9cSAndy Lutomirski 6448a055d7fSAndy LutomirskiGLOBAL(swapgs_restore_regs_and_return_to_usermode) 64526c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 64626c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 6471e4c4f61SBorislav Petkov testb $3, CS(%rsp) 64826c4ef9cSAndy Lutomirski jnz 1f 64926c4ef9cSAndy Lutomirski ud2 65026c4ef9cSAndy Lutomirski1: 65126c4ef9cSAndy Lutomirski#endif 652502af0d7SDominik Brodowski POP_REGS pop_rdi=0 6533e3b9293SAndy Lutomirski 6543e3b9293SAndy Lutomirski /* 6553e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 6563e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 6573e3b9293SAndy Lutomirski */ 6583e3b9293SAndy Lutomirski movq %rsp, %rdi 659c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 6603e3b9293SAndy Lutomirski 6613e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 6623e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 6633e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 6643e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 6653e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 6663e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 6673e3b9293SAndy Lutomirski 6683e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 6693e3b9293SAndy Lutomirski pushq (%rdi) 6703e3b9293SAndy Lutomirski 6713e3b9293SAndy Lutomirski /* 6723e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 6733e3b9293SAndy Lutomirski * We can do future final exit work right here. 6743e3b9293SAndy Lutomirski */ 6753e3b9293SAndy Lutomirski 6766fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 6778a09317bSDave Hansen 6783e3b9293SAndy Lutomirski /* Restore RDI. */ 6793e3b9293SAndy Lutomirski popq %rdi 6803e3b9293SAndy Lutomirski SWAPGS 68126c4ef9cSAndy Lutomirski INTERRUPT_RETURN 68226c4ef9cSAndy Lutomirski 683905a36a2SIngo Molnar 684905a36a2SIngo Molnar/* Returning to kernel space */ 685905a36a2SIngo Molnarretint_kernel: 686905a36a2SIngo Molnar#ifdef CONFIG_PREEMPT 687905a36a2SIngo Molnar /* Interrupts are off */ 688905a36a2SIngo Molnar /* Check if we need preemption */ 6894d732138SIngo Molnar bt $9, EFLAGS(%rsp) /* were interrupts off? */ 690905a36a2SIngo Molnar jnc 1f 691905a36a2SIngo Molnar0: cmpl $0, PER_CPU_VAR(__preempt_count) 692905a36a2SIngo Molnar jnz 1f 693905a36a2SIngo Molnar call preempt_schedule_irq 694905a36a2SIngo Molnar jmp 0b 695905a36a2SIngo Molnar1: 696905a36a2SIngo Molnar#endif 697905a36a2SIngo Molnar /* 698905a36a2SIngo Molnar * The iretq could re-enable interrupts: 699905a36a2SIngo Molnar */ 700905a36a2SIngo Molnar TRACE_IRQS_IRETQ 701905a36a2SIngo Molnar 70226c4ef9cSAndy LutomirskiGLOBAL(restore_regs_and_return_to_kernel) 70326c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 70426c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 7051e4c4f61SBorislav Petkov testb $3, CS(%rsp) 70626c4ef9cSAndy Lutomirski jz 1f 70726c4ef9cSAndy Lutomirski ud2 70826c4ef9cSAndy Lutomirski1: 70926c4ef9cSAndy Lutomirski#endif 710502af0d7SDominik Brodowski POP_REGS 711e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 71210bcc80eSMathieu Desnoyers /* 71310bcc80eSMathieu Desnoyers * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 71410bcc80eSMathieu Desnoyers * when returning from IPI handler. 71510bcc80eSMathieu Desnoyers */ 716905a36a2SIngo Molnar INTERRUPT_RETURN 717905a36a2SIngo Molnar 718905a36a2SIngo MolnarENTRY(native_iret) 7198c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 720905a36a2SIngo Molnar /* 721905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 722905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 723905a36a2SIngo Molnar */ 724905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 725905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 726905a36a2SIngo Molnar jnz native_irq_return_ldt 727905a36a2SIngo Molnar#endif 728905a36a2SIngo Molnar 729905a36a2SIngo Molnar.global native_irq_return_iret 730905a36a2SIngo Molnarnative_irq_return_iret: 731905a36a2SIngo Molnar /* 732905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 733905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 734905a36a2SIngo Molnar * Double-faults due to espfix64 are handled in do_double_fault. 735905a36a2SIngo Molnar * Other faults here are fatal. 736905a36a2SIngo Molnar */ 737905a36a2SIngo Molnar iretq 738905a36a2SIngo Molnar 739905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 740905a36a2SIngo Molnarnative_irq_return_ldt: 74185063facSAndy Lutomirski /* 74285063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 74385063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 74485063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 74585063facSAndy Lutomirski * of the ESPFIX stack. 74685063facSAndy Lutomirski * 74785063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 74885063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 74985063facSAndy Lutomirski * 75085063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 75185063facSAndy Lutomirski * 75285063facSAndy Lutomirski * --- top of ESPFIX stack --- 75385063facSAndy Lutomirski * SS 75485063facSAndy Lutomirski * RSP 75585063facSAndy Lutomirski * RFLAGS 75685063facSAndy Lutomirski * CS 75785063facSAndy Lutomirski * RIP <-- RSP points here when we're done 75885063facSAndy Lutomirski * RAX <-- espfix_waddr points here 75985063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 76085063facSAndy Lutomirski */ 76185063facSAndy Lutomirski 76285063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 7638a09317bSDave Hansen SWAPGS /* to kernel GS */ 7648a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 7658a09317bSDave Hansen 766905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 76785063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 76885063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 769905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 77085063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 771905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 77285063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 773905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 77485063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 775905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 77685063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 777905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 77885063facSAndy Lutomirski /* Now RAX == RSP. */ 77985063facSAndy Lutomirski 78085063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 78185063facSAndy Lutomirski 78285063facSAndy Lutomirski /* 78385063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 78485063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 78585063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 78685063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 78785063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 78885063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 78985063facSAndy Lutomirski */ 790905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 7918a09317bSDave Hansen 7926fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 7938a09317bSDave Hansen SWAPGS /* to user GS */ 7948a09317bSDave Hansen popq %rdi /* Restore user RDI */ 7958a09317bSDave Hansen 796905a36a2SIngo Molnar movq %rax, %rsp 7978c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 79885063facSAndy Lutomirski 79985063facSAndy Lutomirski /* 80085063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 80185063facSAndy Lutomirski * still read. 80285063facSAndy Lutomirski */ 80385063facSAndy Lutomirski popq %rax /* Restore user RAX */ 80485063facSAndy Lutomirski 80585063facSAndy Lutomirski /* 80685063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 80785063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 80885063facSAndy Lutomirski * values. We can now IRET back to userspace. 80985063facSAndy Lutomirski */ 810905a36a2SIngo Molnar jmp native_irq_return_iret 811905a36a2SIngo Molnar#endif 812905a36a2SIngo MolnarEND(common_interrupt) 813905a36a2SIngo Molnar 814905a36a2SIngo Molnar/* 815905a36a2SIngo Molnar * APIC interrupts. 816905a36a2SIngo Molnar */ 817905a36a2SIngo Molnar.macro apicinterrupt3 num sym do_sym 818905a36a2SIngo MolnarENTRY(\sym) 8198c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 820905a36a2SIngo Molnar ASM_CLAC 821905a36a2SIngo Molnar pushq $~(\num) 822905a36a2SIngo Molnar.Lcommon_\sym: 823905a36a2SIngo Molnar interrupt \do_sym 824905a36a2SIngo Molnar jmp ret_from_intr 825905a36a2SIngo MolnarEND(\sym) 826905a36a2SIngo Molnar.endm 827905a36a2SIngo Molnar 828469f0023SAlexander Potapenko/* Make sure APIC interrupt handlers end up in the irqentry section: */ 829469f0023SAlexander Potapenko#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 830469f0023SAlexander Potapenko#define POP_SECTION_IRQENTRY .popsection 831469f0023SAlexander Potapenko 832905a36a2SIngo Molnar.macro apicinterrupt num sym do_sym 833469f0023SAlexander PotapenkoPUSH_SECTION_IRQENTRY 834905a36a2SIngo Molnarapicinterrupt3 \num \sym \do_sym 835469f0023SAlexander PotapenkoPOP_SECTION_IRQENTRY 836905a36a2SIngo Molnar.endm 837905a36a2SIngo Molnar 838905a36a2SIngo Molnar#ifdef CONFIG_SMP 8394d732138SIngo Molnarapicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 8404d732138SIngo Molnarapicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 841905a36a2SIngo Molnar#endif 842905a36a2SIngo Molnar 843905a36a2SIngo Molnar#ifdef CONFIG_X86_UV 8444d732138SIngo Molnarapicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 845905a36a2SIngo Molnar#endif 8464d732138SIngo Molnar 8474d732138SIngo Molnarapicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 8484d732138SIngo Molnarapicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 849905a36a2SIngo Molnar 850905a36a2SIngo Molnar#ifdef CONFIG_HAVE_KVM 8514d732138SIngo Molnarapicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 8524d732138SIngo Molnarapicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 853210f84b0SWincy Vanapicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 854905a36a2SIngo Molnar#endif 855905a36a2SIngo Molnar 856905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE_THRESHOLD 8574d732138SIngo Molnarapicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 858905a36a2SIngo Molnar#endif 859905a36a2SIngo Molnar 8609dda1658SIngo Molnar#ifdef CONFIG_X86_MCE_AMD 8614d732138SIngo Molnarapicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 8629dda1658SIngo Molnar#endif 8639dda1658SIngo Molnar 864905a36a2SIngo Molnar#ifdef CONFIG_X86_THERMAL_VECTOR 8654d732138SIngo Molnarapicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 866905a36a2SIngo Molnar#endif 867905a36a2SIngo Molnar 868905a36a2SIngo Molnar#ifdef CONFIG_SMP 8694d732138SIngo Molnarapicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 8704d732138SIngo Molnarapicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 8714d732138SIngo Molnarapicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 872905a36a2SIngo Molnar#endif 873905a36a2SIngo Molnar 8744d732138SIngo Molnarapicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 8754d732138SIngo Molnarapicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 876905a36a2SIngo Molnar 877905a36a2SIngo Molnar#ifdef CONFIG_IRQ_WORK 8784d732138SIngo Molnarapicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 879905a36a2SIngo Molnar#endif 880905a36a2SIngo Molnar 881905a36a2SIngo Molnar/* 882905a36a2SIngo Molnar * Exception entry points. 883905a36a2SIngo Molnar */ 884c482feefSAndy Lutomirski#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) 885905a36a2SIngo Molnar 886*90a6acc4SDominik Brodowski#if defined(CONFIG_IA32_EMULATION) 887*90a6acc4SDominik Brodowski/* entry_64_compat.S::entry_INT80_compat expects this to be an ASM function */ 8887f2590a1SAndy LutomirskiENTRY(switch_to_thread_stack) 8897f2590a1SAndy Lutomirski UNWIND_HINT_FUNC 8907f2590a1SAndy Lutomirski 891*90a6acc4SDominik Brodowski DO_SWITCH_TO_THREAD_STACK 8927f2590a1SAndy Lutomirski 8937f2590a1SAndy Lutomirski ret 8947f2590a1SAndy LutomirskiEND(switch_to_thread_stack) 895*90a6acc4SDominik Brodowski#endif 896905a36a2SIngo Molnar 897905a36a2SIngo Molnar.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 898905a36a2SIngo MolnarENTRY(\sym) 89998990a33SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=\has_error_code*8 9008c1f7558SJosh Poimboeuf 901905a36a2SIngo Molnar /* Sanity check */ 902905a36a2SIngo Molnar .if \shift_ist != -1 && \paranoid == 0 903905a36a2SIngo Molnar .error "using shift_ist requires paranoid=1" 904905a36a2SIngo Molnar .endif 905905a36a2SIngo Molnar 906905a36a2SIngo Molnar ASM_CLAC 907905a36a2SIngo Molnar 90882c62fa0SJosh Poimboeuf .if \has_error_code == 0 909905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 910905a36a2SIngo Molnar .endif 911905a36a2SIngo Molnar 9127f2590a1SAndy Lutomirski .if \paranoid < 2 9139e809d15SDominik Brodowski testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ 9147f2590a1SAndy Lutomirski jnz .Lfrom_usermode_switch_stack_\@ 915905a36a2SIngo Molnar .endif 9167f2590a1SAndy Lutomirski 9177f2590a1SAndy Lutomirski .if \paranoid 918905a36a2SIngo Molnar call paranoid_entry 919905a36a2SIngo Molnar .else 920905a36a2SIngo Molnar call error_entry 921905a36a2SIngo Molnar .endif 9228c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 923905a36a2SIngo Molnar /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 924905a36a2SIngo Molnar 925905a36a2SIngo Molnar .if \paranoid 926905a36a2SIngo Molnar .if \shift_ist != -1 927905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 928905a36a2SIngo Molnar .else 929905a36a2SIngo Molnar TRACE_IRQS_OFF 930905a36a2SIngo Molnar .endif 931905a36a2SIngo Molnar .endif 932905a36a2SIngo Molnar 933905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 934905a36a2SIngo Molnar 935905a36a2SIngo Molnar .if \has_error_code 936905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 937905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 938905a36a2SIngo Molnar .else 939905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 940905a36a2SIngo Molnar .endif 941905a36a2SIngo Molnar 942905a36a2SIngo Molnar .if \shift_ist != -1 943905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 944905a36a2SIngo Molnar .endif 945905a36a2SIngo Molnar 946905a36a2SIngo Molnar call \do_sym 947905a36a2SIngo Molnar 948905a36a2SIngo Molnar .if \shift_ist != -1 949905a36a2SIngo Molnar addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 950905a36a2SIngo Molnar .endif 951905a36a2SIngo Molnar 952905a36a2SIngo Molnar /* these procedures expect "no swapgs" flag in ebx */ 953905a36a2SIngo Molnar .if \paranoid 954905a36a2SIngo Molnar jmp paranoid_exit 955905a36a2SIngo Molnar .else 956905a36a2SIngo Molnar jmp error_exit 957905a36a2SIngo Molnar .endif 958905a36a2SIngo Molnar 9597f2590a1SAndy Lutomirski .if \paranoid < 2 960905a36a2SIngo Molnar /* 9617f2590a1SAndy Lutomirski * Entry from userspace. Switch stacks and treat it 962905a36a2SIngo Molnar * as a normal entry. This means that paranoid handlers 963905a36a2SIngo Molnar * run in real process context if user_mode(regs). 964905a36a2SIngo Molnar */ 9657f2590a1SAndy Lutomirski.Lfrom_usermode_switch_stack_\@: 966905a36a2SIngo Molnar call error_entry 967905a36a2SIngo Molnar 968905a36a2SIngo Molnar movq %rsp, %rdi /* pt_regs pointer */ 969905a36a2SIngo Molnar 970905a36a2SIngo Molnar .if \has_error_code 971905a36a2SIngo Molnar movq ORIG_RAX(%rsp), %rsi /* get error code */ 972905a36a2SIngo Molnar movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 973905a36a2SIngo Molnar .else 974905a36a2SIngo Molnar xorl %esi, %esi /* no error code */ 975905a36a2SIngo Molnar .endif 976905a36a2SIngo Molnar 977905a36a2SIngo Molnar call \do_sym 978905a36a2SIngo Molnar 979905a36a2SIngo Molnar jmp error_exit /* %ebx: no swapgs flag */ 980905a36a2SIngo Molnar .endif 981905a36a2SIngo MolnarEND(\sym) 982905a36a2SIngo Molnar.endm 983905a36a2SIngo Molnar 984905a36a2SIngo Molnaridtentry divide_error do_divide_error has_error_code=0 985905a36a2SIngo Molnaridtentry overflow do_overflow has_error_code=0 986905a36a2SIngo Molnaridtentry bounds do_bounds has_error_code=0 987905a36a2SIngo Molnaridtentry invalid_op do_invalid_op has_error_code=0 988905a36a2SIngo Molnaridtentry device_not_available do_device_not_available has_error_code=0 989905a36a2SIngo Molnaridtentry double_fault do_double_fault has_error_code=1 paranoid=2 990905a36a2SIngo Molnaridtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 991905a36a2SIngo Molnaridtentry invalid_TSS do_invalid_TSS has_error_code=1 992905a36a2SIngo Molnaridtentry segment_not_present do_segment_not_present has_error_code=1 993905a36a2SIngo Molnaridtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 994905a36a2SIngo Molnaridtentry coprocessor_error do_coprocessor_error has_error_code=0 995905a36a2SIngo Molnaridtentry alignment_check do_alignment_check has_error_code=1 996905a36a2SIngo Molnaridtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 997905a36a2SIngo Molnar 998905a36a2SIngo Molnar 9994d732138SIngo Molnar /* 10004d732138SIngo Molnar * Reload gs selector with exception handling 10014d732138SIngo Molnar * edi: new selector 10024d732138SIngo Molnar */ 1003905a36a2SIngo MolnarENTRY(native_load_gs_index) 10048c1f7558SJosh Poimboeuf FRAME_BEGIN 1005905a36a2SIngo Molnar pushfq 1006905a36a2SIngo Molnar DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 1007ca37e57bSAndy Lutomirski TRACE_IRQS_OFF 1008905a36a2SIngo Molnar SWAPGS 100942c748bbSBorislav Petkov.Lgs_change: 1010905a36a2SIngo Molnar movl %edi, %gs 101196e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 1012905a36a2SIngo Molnar SWAPGS 1013ca37e57bSAndy Lutomirski TRACE_IRQS_FLAGS (%rsp) 1014905a36a2SIngo Molnar popfq 10158c1f7558SJosh Poimboeuf FRAME_END 1016905a36a2SIngo Molnar ret 10178c1f7558SJosh PoimboeufENDPROC(native_load_gs_index) 1018784d5699SAl ViroEXPORT_SYMBOL(native_load_gs_index) 1019905a36a2SIngo Molnar 102042c748bbSBorislav Petkov _ASM_EXTABLE(.Lgs_change, bad_gs) 1021905a36a2SIngo Molnar .section .fixup, "ax" 1022905a36a2SIngo Molnar /* running with kernelgs */ 1023905a36a2SIngo Molnarbad_gs: 1024905a36a2SIngo Molnar SWAPGS /* switch back to user gs */ 1025b038c842SAndy Lutomirski.macro ZAP_GS 1026b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 1027b038c842SAndy Lutomirski movl $__USER_DS, %eax 1028b038c842SAndy Lutomirski movl %eax, %gs 1029b038c842SAndy Lutomirski.endm 1030b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 1031905a36a2SIngo Molnar xorl %eax, %eax 1032905a36a2SIngo Molnar movl %eax, %gs 1033905a36a2SIngo Molnar jmp 2b 1034905a36a2SIngo Molnar .previous 1035905a36a2SIngo Molnar 1036905a36a2SIngo Molnar/* Call softirq on interrupt stack. Interrupts are off. */ 1037905a36a2SIngo MolnarENTRY(do_softirq_own_stack) 1038905a36a2SIngo Molnar pushq %rbp 1039905a36a2SIngo Molnar mov %rsp, %rbp 10408c1f7558SJosh Poimboeuf ENTER_IRQ_STACK regs=0 old_rsp=%r11 1041905a36a2SIngo Molnar call __do_softirq 10428c1f7558SJosh Poimboeuf LEAVE_IRQ_STACK regs=0 1043905a36a2SIngo Molnar leaveq 1044905a36a2SIngo Molnar ret 10458c1f7558SJosh PoimboeufENDPROC(do_softirq_own_stack) 1046905a36a2SIngo Molnar 1047905a36a2SIngo Molnar#ifdef CONFIG_XEN 10485878d5d6SJuergen Grossidtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 1049905a36a2SIngo Molnar 1050905a36a2SIngo Molnar/* 1051905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 1052905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 1053905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 1054905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 1055905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 1056905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 1057905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 1058905a36a2SIngo Molnar * like to avoid the possibility. 1059905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 1060905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 1061905a36a2SIngo Molnar * activation and restart the handler using the previous one. 1062905a36a2SIngo Molnar */ 10634d732138SIngo MolnarENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 10644d732138SIngo Molnar 1065905a36a2SIngo Molnar/* 1066905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1067905a36a2SIngo Molnar * see the correct pointer to the pt_regs 1068905a36a2SIngo Molnar */ 10698c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 10704d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 10718c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 10721d3e53e8SAndy Lutomirski 10731d3e53e8SAndy Lutomirski ENTER_IRQ_STACK old_rsp=%r10 1074905a36a2SIngo Molnar call xen_evtchn_do_upcall 10751d3e53e8SAndy Lutomirski LEAVE_IRQ_STACK 10761d3e53e8SAndy Lutomirski 1077905a36a2SIngo Molnar#ifndef CONFIG_PREEMPT 1078905a36a2SIngo Molnar call xen_maybe_preempt_hcall 1079905a36a2SIngo Molnar#endif 1080905a36a2SIngo Molnar jmp error_exit 1081905a36a2SIngo MolnarEND(xen_do_hypervisor_callback) 1082905a36a2SIngo Molnar 1083905a36a2SIngo Molnar/* 1084905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 1085905a36a2SIngo Molnar * We get here for two reasons: 1086905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 1087905a36a2SIngo Molnar * 2. Fault while executing IRET 1088905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 1089905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 1090905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 1091905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 1092905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1093905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 1094905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 1095905a36a2SIngo Molnar */ 1096905a36a2SIngo MolnarENTRY(xen_failsafe_callback) 10978c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1098905a36a2SIngo Molnar movl %ds, %ecx 1099905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 1100905a36a2SIngo Molnar jne 1f 1101905a36a2SIngo Molnar movl %es, %ecx 1102905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 1103905a36a2SIngo Molnar jne 1f 1104905a36a2SIngo Molnar movl %fs, %ecx 1105905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 1106905a36a2SIngo Molnar jne 1f 1107905a36a2SIngo Molnar movl %gs, %ecx 1108905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 1109905a36a2SIngo Molnar jne 1f 1110905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 1111905a36a2SIngo Molnar movq (%rsp), %rcx 1112905a36a2SIngo Molnar movq 8(%rsp), %r11 1113905a36a2SIngo Molnar addq $0x30, %rsp 1114905a36a2SIngo Molnar pushq $0 /* RIP */ 11158c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 1116905a36a2SIngo Molnar jmp general_protection 1117905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1118905a36a2SIngo Molnar movq (%rsp), %rcx 1119905a36a2SIngo Molnar movq 8(%rsp), %r11 1120905a36a2SIngo Molnar addq $0x30, %rsp 11218c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1122905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 11233f01daecSDominik Brodowski PUSH_AND_CLEAR_REGS 1124946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 1125905a36a2SIngo Molnar jmp error_exit 1126905a36a2SIngo MolnarEND(xen_failsafe_callback) 1127905a36a2SIngo Molnar 1128905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1129905a36a2SIngo Molnar xen_hvm_callback_vector xen_evtchn_do_upcall 1130905a36a2SIngo Molnar 1131905a36a2SIngo Molnar#endif /* CONFIG_XEN */ 1132905a36a2SIngo Molnar 1133905a36a2SIngo Molnar#if IS_ENABLED(CONFIG_HYPERV) 1134905a36a2SIngo Molnarapicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1135905a36a2SIngo Molnar hyperv_callback_vector hyperv_vector_handler 113693286261SVitaly Kuznetsov 113793286261SVitaly Kuznetsovapicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ 113893286261SVitaly Kuznetsov hyperv_reenlightenment_vector hyperv_reenlightenment_intr 1139905a36a2SIngo Molnar#endif /* CONFIG_HYPERV */ 1140905a36a2SIngo Molnar 1141905a36a2SIngo Molnaridtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1142905a36a2SIngo Molnaridtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1143905a36a2SIngo Molnaridtentry stack_segment do_stack_segment has_error_code=1 11444d732138SIngo Molnar 1145905a36a2SIngo Molnar#ifdef CONFIG_XEN 114643e41110SJuergen Grossidtentry xennmi do_nmi has_error_code=0 11475878d5d6SJuergen Grossidtentry xendebug do_debug has_error_code=0 11485878d5d6SJuergen Grossidtentry xenint3 do_int3 has_error_code=0 1149905a36a2SIngo Molnar#endif 11504d732138SIngo Molnar 1151905a36a2SIngo Molnaridtentry general_protection do_general_protection has_error_code=1 115211a7ffb0SThomas Gleixneridtentry page_fault do_page_fault has_error_code=1 11534d732138SIngo Molnar 1154905a36a2SIngo Molnar#ifdef CONFIG_KVM_GUEST 1155905a36a2SIngo Molnaridtentry async_page_fault do_async_page_fault has_error_code=1 1156905a36a2SIngo Molnar#endif 11574d732138SIngo Molnar 1158905a36a2SIngo Molnar#ifdef CONFIG_X86_MCE 11596f41c34dSThomas Gleixneridtentry machine_check do_mce has_error_code=0 paranoid=1 1160905a36a2SIngo Molnar#endif 1161905a36a2SIngo Molnar 1162905a36a2SIngo Molnar/* 11639e809d15SDominik Brodowski * Save all registers in pt_regs, and switch gs if needed. 1164905a36a2SIngo Molnar * Use slow, but surefire "are we in kernel?" check. 1165905a36a2SIngo Molnar * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1166905a36a2SIngo Molnar */ 1167905a36a2SIngo MolnarENTRY(paranoid_entry) 11688c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 1169905a36a2SIngo Molnar cld 11709e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 11719e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 1172905a36a2SIngo Molnar movl $1, %ebx 1173905a36a2SIngo Molnar movl $MSR_GS_BASE, %ecx 1174905a36a2SIngo Molnar rdmsr 1175905a36a2SIngo Molnar testl %edx, %edx 1176905a36a2SIngo Molnar js 1f /* negative -> in kernel */ 1177905a36a2SIngo Molnar SWAPGS 1178905a36a2SIngo Molnar xorl %ebx, %ebx 11798a09317bSDave Hansen 11808a09317bSDave Hansen1: 11818a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 11828a09317bSDave Hansen 11838a09317bSDave Hansen ret 1184905a36a2SIngo MolnarEND(paranoid_entry) 1185905a36a2SIngo Molnar 1186905a36a2SIngo Molnar/* 1187905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 1188905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 1189905a36a2SIngo Molnar * from kernel space. 1190905a36a2SIngo Molnar * 1191905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 1192905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 1193905a36a2SIngo Molnar * be complicated. Fortunately, we there's no good reason 1194905a36a2SIngo Molnar * to try to handle preemption here. 11954d732138SIngo Molnar * 11964d732138SIngo Molnar * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1197905a36a2SIngo Molnar */ 1198905a36a2SIngo MolnarENTRY(paranoid_exit) 11998c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 12002140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1201905a36a2SIngo Molnar TRACE_IRQS_OFF_DEBUG 1202905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1203e5317832SAndy Lutomirski jnz .Lparanoid_exit_no_swapgs 1204905a36a2SIngo Molnar TRACE_IRQS_IRETQ 120521e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1206905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1207e5317832SAndy Lutomirski jmp .Lparanoid_exit_restore 1208e5317832SAndy Lutomirski.Lparanoid_exit_no_swapgs: 1209905a36a2SIngo Molnar TRACE_IRQS_IRETQ_DEBUG 1210e4865757SIngo Molnar RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1211e5317832SAndy Lutomirski.Lparanoid_exit_restore: 1212e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 1213905a36a2SIngo MolnarEND(paranoid_exit) 1214905a36a2SIngo Molnar 1215905a36a2SIngo Molnar/* 12169e809d15SDominik Brodowski * Save all registers in pt_regs, and switch GS if needed. 1217539f5113SAndy Lutomirski * Return: EBX=0: came from user mode; EBX=1: otherwise 1218905a36a2SIngo Molnar */ 1219905a36a2SIngo MolnarENTRY(error_entry) 12209e809d15SDominik Brodowski UNWIND_HINT_FUNC 1221905a36a2SIngo Molnar cld 12229e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 12239e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 1224905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1225cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1226539f5113SAndy Lutomirski 1227cb6f64edSAndy Lutomirski /* 1228cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1229cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1230cb6f64edSAndy Lutomirski */ 1231905a36a2SIngo Molnar SWAPGS 12328a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 12338a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1234539f5113SAndy Lutomirski 1235cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 12367f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 12377f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 12387f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 12397f2590a1SAndy Lutomirski call sync_regs 12407f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 12417f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 12427f2590a1SAndy Lutomirski pushq %r12 12437f2590a1SAndy Lutomirski 1244f1075053SAndy Lutomirski /* 1245f1075053SAndy Lutomirski * We need to tell lockdep that IRQs are off. We can't do this until 1246f1075053SAndy Lutomirski * we fix gsbase, and we should do it before enter_from_user_mode 1247f1075053SAndy Lutomirski * (which can take locks). 1248f1075053SAndy Lutomirski */ 1249f1075053SAndy Lutomirski TRACE_IRQS_OFF 1250478dc89cSAndy Lutomirski CALL_enter_from_user_mode 1251f1075053SAndy Lutomirski ret 125202bc7768SAndy Lutomirski 1253cb6f64edSAndy Lutomirski.Lerror_entry_done: 1254905a36a2SIngo Molnar TRACE_IRQS_OFF 1255905a36a2SIngo Molnar ret 1256905a36a2SIngo Molnar 1257905a36a2SIngo Molnar /* 1258905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1259905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1260905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1261905a36a2SIngo Molnar * for these here too. 1262905a36a2SIngo Molnar */ 1263cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1264905a36a2SIngo Molnar incl %ebx 1265905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1266905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1267cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1268905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1269905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1270cb6f64edSAndy Lutomirski je .Lbstep_iret 127142c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 1272cb6f64edSAndy Lutomirski jne .Lerror_entry_done 1273539f5113SAndy Lutomirski 1274539f5113SAndy Lutomirski /* 127542c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1276539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 127742c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1278539f5113SAndy Lutomirski */ 12792fa5f04fSWanpeng Li SWAPGS 12808a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 12812fa5f04fSWanpeng Li jmp .Lerror_entry_done 1282905a36a2SIngo Molnar 1283cb6f64edSAndy Lutomirski.Lbstep_iret: 1284905a36a2SIngo Molnar /* Fix truncated RIP */ 1285905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1286905a36a2SIngo Molnar /* fall through */ 1287905a36a2SIngo Molnar 1288cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1289539f5113SAndy Lutomirski /* 12908a09317bSDave Hansen * We came from an IRET to user mode, so we have user 12918a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 1292539f5113SAndy Lutomirski */ 1293905a36a2SIngo Molnar SWAPGS 12948a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1295539f5113SAndy Lutomirski 1296539f5113SAndy Lutomirski /* 1297539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1298539f5113SAndy Lutomirski * as if we faulted immediately after IRET and clear EBX so that 1299539f5113SAndy Lutomirski * error_exit knows that we will be returning to user mode. 1300539f5113SAndy Lutomirski */ 1301905a36a2SIngo Molnar mov %rsp, %rdi 1302905a36a2SIngo Molnar call fixup_bad_iret 1303905a36a2SIngo Molnar mov %rax, %rsp 1304539f5113SAndy Lutomirski decl %ebx 1305cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1306905a36a2SIngo MolnarEND(error_entry) 1307905a36a2SIngo Molnar 1308905a36a2SIngo Molnar 1309539f5113SAndy Lutomirski/* 131075ca5b22SNicolas Iooss * On entry, EBX is a "return to kernel mode" flag: 1311539f5113SAndy Lutomirski * 1: already in kernel mode, don't need SWAPGS 1312539f5113SAndy Lutomirski * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1313539f5113SAndy Lutomirski */ 1314905a36a2SIngo MolnarENTRY(error_exit) 13158c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 13162140a994SJan Beulich DISABLE_INTERRUPTS(CLBR_ANY) 1317905a36a2SIngo Molnar TRACE_IRQS_OFF 13182140a994SJan Beulich testl %ebx, %ebx 1319905a36a2SIngo Molnar jnz retint_kernel 1320905a36a2SIngo Molnar jmp retint_user 1321905a36a2SIngo MolnarEND(error_exit) 1322905a36a2SIngo Molnar 1323929bacecSAndy Lutomirski/* 1324929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1325929bacecSAndy Lutomirski * so we can use real assembly here. 13268a09317bSDave Hansen * 13278a09317bSDave Hansen * Registers: 13288a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 13298a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1330929bacecSAndy Lutomirski */ 1331905a36a2SIngo MolnarENTRY(nmi) 13328c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1333929bacecSAndy Lutomirski 1334fc57a7c6SAndy Lutomirski /* 1335905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1336905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1337905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1338905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1339905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1340905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1341905a36a2SIngo Molnar * anyway. 1342905a36a2SIngo Molnar * 1343905a36a2SIngo Molnar * To handle this case we do the following: 1344905a36a2SIngo Molnar * Check the a special location on the stack that contains 1345905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1346905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1347905a36a2SIngo Molnar * is an NMI stack. 1348905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1349905a36a2SIngo Molnar * stack then: 1350905a36a2SIngo Molnar * o Set the special variable on the stack 13510b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 13520b22930eSAndy Lutomirski * stack 13530b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1354905a36a2SIngo Molnar * o Continue processing the NMI 1355905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 13560b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1357905a36a2SIngo Molnar * o return back to the first NMI 1358905a36a2SIngo Molnar * 1359905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1360905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1361905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1362905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1363905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1364905a36a2SIngo Molnar * NMI. 13659b6e6a83SAndy Lutomirski * 13669b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 13679b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 13689b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 13699b6e6a83SAndy Lutomirski * other IST entries. 1370905a36a2SIngo Molnar */ 1371905a36a2SIngo Molnar 1372e93c1730SAndy Lutomirski ASM_CLAC 1373e93c1730SAndy Lutomirski 1374905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1375905a36a2SIngo Molnar pushq %rdx 1376905a36a2SIngo Molnar 13779b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 13789b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1379905a36a2SIngo Molnar 1380905a36a2SIngo Molnar /* 13819b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 13829b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 13839b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 13849b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 13859b6e6a83SAndy Lutomirski * are off. 138683c133cfSAndy Lutomirski * 138783c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 138883c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 13899b6e6a83SAndy Lutomirski */ 13909b6e6a83SAndy Lutomirski 1391929bacecSAndy Lutomirski swapgs 13929b6e6a83SAndy Lutomirski cld 13938a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 13949b6e6a83SAndy Lutomirski movq %rsp, %rdx 13959b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 13968c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 13979b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 13989b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 13999b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 14009b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 14019b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 14028c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 14039b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 140430907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rdx=(%rdx) 1405946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 14069b6e6a83SAndy Lutomirski 14079b6e6a83SAndy Lutomirski /* 14089b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 14099b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 14109b6e6a83SAndy Lutomirski * done with the NMI stack. 14119b6e6a83SAndy Lutomirski */ 14129b6e6a83SAndy Lutomirski 14139b6e6a83SAndy Lutomirski movq %rsp, %rdi 14149b6e6a83SAndy Lutomirski movq $-1, %rsi 14159b6e6a83SAndy Lutomirski call do_nmi 14169b6e6a83SAndy Lutomirski 14179b6e6a83SAndy Lutomirski /* 14189b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1419946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 14209b6e6a83SAndy Lutomirski */ 14218a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 14229b6e6a83SAndy Lutomirski 14239b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 14249b6e6a83SAndy Lutomirski /* 14250b22930eSAndy Lutomirski * Here's what our stack frame will look like: 14260b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14270b22930eSAndy Lutomirski * | original SS | 14280b22930eSAndy Lutomirski * | original Return RSP | 14290b22930eSAndy Lutomirski * | original RFLAGS | 14300b22930eSAndy Lutomirski * | original CS | 14310b22930eSAndy Lutomirski * | original RIP | 14320b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14330b22930eSAndy Lutomirski * | temp storage for rdx | 14340b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14350b22930eSAndy Lutomirski * | "NMI executing" variable | 14360b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14370b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 14380b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 14390b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 14400b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 14410b22930eSAndy Lutomirski * | iret RIP } | 14420b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14430b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 14440b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 14450b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 14460b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 14470b22930eSAndy Lutomirski * | outermost RIP } iteration. | 14480b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14490b22930eSAndy Lutomirski * | pt_regs | 14500b22930eSAndy Lutomirski * +---------------------------------------------------------+ 14510b22930eSAndy Lutomirski * 14520b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 14530b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 14540b22930eSAndy Lutomirski * space for the asm code here. 14550b22930eSAndy Lutomirski * 14560b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 14570b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 14580b22930eSAndy Lutomirski * processing. 14590b22930eSAndy Lutomirski * 14600b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 14610b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 14620b22930eSAndy Lutomirski * frame pointing to the final return target. 14630b22930eSAndy Lutomirski */ 14640b22930eSAndy Lutomirski 14650b22930eSAndy Lutomirski /* 14660b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 14670b22930eSAndy Lutomirski * 1468a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1469a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1470a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1471a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 1472a27507caSAndy Lutomirski * about to about to call do_nmi anyway, so we can just 1473a27507caSAndy Lutomirski * resume the outer NMI. 1474a27507caSAndy Lutomirski */ 1475a27507caSAndy Lutomirski 1476a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1477a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1478a27507caSAndy Lutomirski ja 1f 1479a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1480a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1481a27507caSAndy Lutomirski ja nested_nmi_out 1482a27507caSAndy Lutomirski1: 1483a27507caSAndy Lutomirski 1484a27507caSAndy Lutomirski /* 1485a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 14860b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 14870b22930eSAndy Lutomirski * before IRET. 1488905a36a2SIngo Molnar */ 1489905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1490905a36a2SIngo Molnar je nested_nmi 1491905a36a2SIngo Molnar 1492905a36a2SIngo Molnar /* 14930b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 14940b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1495810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1496810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1497810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1498810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1499810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1500810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1501810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1502810bc075SAndy Lutomirski * "NMI executing". 1503905a36a2SIngo Molnar */ 1504905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1505905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1506905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1507905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1508905a36a2SIngo Molnar ja first_nmi 15094d732138SIngo Molnar 1510905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1511905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1512905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1513905a36a2SIngo Molnar jb first_nmi 1514810bc075SAndy Lutomirski 1515810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1516810bc075SAndy Lutomirski 1517810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1518810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1519810bc075SAndy Lutomirski 1520810bc075SAndy Lutomirski /* This is a nested NMI. */ 1521905a36a2SIngo Molnar 1522905a36a2SIngo Molnarnested_nmi: 1523905a36a2SIngo Molnar /* 15240b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 15250b22930eSAndy Lutomirski * iteration of NMI handling. 1526905a36a2SIngo Molnar */ 152723a781e9SAndy Lutomirski subq $8, %rsp 1528905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1529905a36a2SIngo Molnar pushq $__KERNEL_DS 1530905a36a2SIngo Molnar pushq %rdx 1531905a36a2SIngo Molnar pushfq 1532905a36a2SIngo Molnar pushq $__KERNEL_CS 1533905a36a2SIngo Molnar pushq $repeat_nmi 1534905a36a2SIngo Molnar 1535905a36a2SIngo Molnar /* Put stack back */ 1536905a36a2SIngo Molnar addq $(6*8), %rsp 1537905a36a2SIngo Molnar 1538905a36a2SIngo Molnarnested_nmi_out: 1539905a36a2SIngo Molnar popq %rdx 1540905a36a2SIngo Molnar 15410b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1542929bacecSAndy Lutomirski iretq 1543905a36a2SIngo Molnar 1544905a36a2SIngo Molnarfirst_nmi: 15450b22930eSAndy Lutomirski /* Restore rdx. */ 1546905a36a2SIngo Molnar movq (%rsp), %rdx 1547905a36a2SIngo Molnar 154836f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 154936f1a77bSAndy Lutomirski pushq $0 1550905a36a2SIngo Molnar 15510b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1552905a36a2SIngo Molnar subq $(5*8), %rsp 1553905a36a2SIngo Molnar 15540b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1555905a36a2SIngo Molnar .rept 5 1556905a36a2SIngo Molnar pushq 11*8(%rsp) 1557905a36a2SIngo Molnar .endr 15588c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1559905a36a2SIngo Molnar 1560905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1561905a36a2SIngo Molnar 1562a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1563a97439aaSAndy Lutomirski /* 1564a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1565a97439aaSAndy Lutomirski * default because IRET is very expensive. 1566a97439aaSAndy Lutomirski */ 1567a97439aaSAndy Lutomirski pushq $0 /* SS */ 1568a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1569a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1570a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1571a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1572a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1573929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 15748c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1575a97439aaSAndy Lutomirski1: 1576a97439aaSAndy Lutomirski#endif 1577a97439aaSAndy Lutomirski 15780b22930eSAndy Lutomirskirepeat_nmi: 1579905a36a2SIngo Molnar /* 1580905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1581905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1582905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1583905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1584905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1585905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1586905a36a2SIngo Molnar * NMI will update. 15870b22930eSAndy Lutomirski * 15880b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 15890b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 15900b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 159136f1a77bSAndy Lutomirski * gsbase if needed before we call do_nmi. "NMI executing" 159236f1a77bSAndy Lutomirski * is zero. 1593905a36a2SIngo Molnar */ 159436f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1595905a36a2SIngo Molnar 15960b22930eSAndy Lutomirski /* 15970b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 15980b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 15990b22930eSAndy Lutomirski * it or it will end up containing garbage. 16000b22930eSAndy Lutomirski */ 1601905a36a2SIngo Molnar addq $(10*8), %rsp 1602905a36a2SIngo Molnar .rept 5 1603905a36a2SIngo Molnar pushq -6*8(%rsp) 1604905a36a2SIngo Molnar .endr 1605905a36a2SIngo Molnar subq $(5*8), %rsp 1606905a36a2SIngo Molnarend_repeat_nmi: 1607905a36a2SIngo Molnar 1608905a36a2SIngo Molnar /* 16090b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 16100b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 16110b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1612905a36a2SIngo Molnar */ 1613905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1614905a36a2SIngo Molnar 1615905a36a2SIngo Molnar /* 1616905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1617905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1618905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1619905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1620905a36a2SIngo Molnar * exceptions might do. 1621905a36a2SIngo Molnar */ 1622905a36a2SIngo Molnar call paranoid_entry 16238c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1624905a36a2SIngo Molnar 1625905a36a2SIngo Molnar /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1626905a36a2SIngo Molnar movq %rsp, %rdi 1627905a36a2SIngo Molnar movq $-1, %rsi 1628905a36a2SIngo Molnar call do_nmi 1629905a36a2SIngo Molnar 163021e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 16318a09317bSDave Hansen 1632905a36a2SIngo Molnar testl %ebx, %ebx /* swapgs needed? */ 1633905a36a2SIngo Molnar jnz nmi_restore 1634905a36a2SIngo Molnarnmi_swapgs: 1635905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1636905a36a2SIngo Molnarnmi_restore: 1637502af0d7SDominik Brodowski POP_REGS 16380b22930eSAndy Lutomirski 1639471ee483SAndy Lutomirski /* 1640471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1641471ee483SAndy Lutomirski * at the "iret" frame. 1642471ee483SAndy Lutomirski */ 1643471ee483SAndy Lutomirski addq $6*8, %rsp 1644905a36a2SIngo Molnar 1645810bc075SAndy Lutomirski /* 1646810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1647810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1648929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1649929bacecSAndy Lutomirski * 1650929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1651929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1652929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1653810bc075SAndy Lutomirski */ 1654810bc075SAndy Lutomirski std 1655810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 16560b22930eSAndy Lutomirski 16570b22930eSAndy Lutomirski /* 1658929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1659929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1660929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1661929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 16620b22930eSAndy Lutomirski */ 1663929bacecSAndy Lutomirski iretq 1664905a36a2SIngo MolnarEND(nmi) 1665905a36a2SIngo Molnar 1666905a36a2SIngo MolnarENTRY(ignore_sysret) 16678c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1668905a36a2SIngo Molnar mov $-ENOSYS, %eax 1669905a36a2SIngo Molnar sysret 1670905a36a2SIngo MolnarEND(ignore_sysret) 16712deb4be2SAndy Lutomirski 16722deb4be2SAndy LutomirskiENTRY(rewind_stack_do_exit) 16738c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 16742deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 16752deb4be2SAndy Lutomirski xorl %ebp, %ebp 16762deb4be2SAndy Lutomirski 16772deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 16788c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 16798c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 16802deb4be2SAndy Lutomirski 16812deb4be2SAndy Lutomirski call do_exit 16822deb4be2SAndy LutomirskiEND(rewind_stack_do_exit) 1683