1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11cb1aaebeSMauro Carvalho Chehab * Some of this is documented in Documentation/x86/entry_64.rst 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 186dcc5627SJiri Slaby * - SYM_FUNC_START/END:Define functions in the symbol table. 194d732138SIngo Molnar * - idtentry: Define exception entry points. 20905a36a2SIngo Molnar */ 21905a36a2SIngo Molnar#include <linux/linkage.h> 22905a36a2SIngo Molnar#include <asm/segment.h> 23905a36a2SIngo Molnar#include <asm/cache.h> 24905a36a2SIngo Molnar#include <asm/errno.h> 25905a36a2SIngo Molnar#include <asm/asm-offsets.h> 26905a36a2SIngo Molnar#include <asm/msr.h> 27905a36a2SIngo Molnar#include <asm/unistd.h> 28905a36a2SIngo Molnar#include <asm/thread_info.h> 29905a36a2SIngo Molnar#include <asm/hw_irq.h> 30905a36a2SIngo Molnar#include <asm/page_types.h> 31905a36a2SIngo Molnar#include <asm/irqflags.h> 32905a36a2SIngo Molnar#include <asm/paravirt.h> 33905a36a2SIngo Molnar#include <asm/percpu.h> 34905a36a2SIngo Molnar#include <asm/asm.h> 35905a36a2SIngo Molnar#include <asm/smap.h> 36905a36a2SIngo Molnar#include <asm/pgtable_types.h> 37784d5699SAl Viro#include <asm/export.h> 388c1f7558SJosh Poimboeuf#include <asm/frame.h> 39cfa82a00SThomas Gleixner#include <asm/trapnr.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41c82965f9SChang S. Bae#include <asm/fsgsbase.h> 42905a36a2SIngo Molnar#include <linux/err.h> 43905a36a2SIngo Molnar 446fd166aaSPeter Zijlstra#include "calling.h" 456fd166aaSPeter Zijlstra 46905a36a2SIngo Molnar.code64 47905a36a2SIngo Molnar.section .entry.text, "ax" 48905a36a2SIngo Molnar 49905a36a2SIngo Molnar/* 504d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 51905a36a2SIngo Molnar * 52fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 53fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 54fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 55fda57b22SAndy Lutomirski * available when SYSCALL is used. 56fda57b22SAndy Lutomirski * 57fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 58fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 59fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 60fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 61fda57b22SAndy Lutomirski * 624d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 63905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 64905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 65905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 66905a36a2SIngo Molnar * and does not change rsp. 67905a36a2SIngo Molnar * 68905a36a2SIngo Molnar * Registers on entry: 69905a36a2SIngo Molnar * rax system call number 70905a36a2SIngo Molnar * rcx return address 71905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 72905a36a2SIngo Molnar * rdi arg0 73905a36a2SIngo Molnar * rsi arg1 74905a36a2SIngo Molnar * rdx arg2 75905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 76905a36a2SIngo Molnar * r8 arg4 77905a36a2SIngo Molnar * r9 arg5 78905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 79905a36a2SIngo Molnar * 80905a36a2SIngo Molnar * Only called from user space. 81905a36a2SIngo Molnar * 82905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 83905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 84905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 85905a36a2SIngo Molnar */ 86905a36a2SIngo Molnar 87bc7b11c0SJiri SlabySYM_CODE_START(entry_SYSCALL_64) 888c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 89905a36a2SIngo Molnar 908a9949bcSAndy Lutomirski swapgs 91bf904d27SAndy Lutomirski /* tss.sp2 is scratch space. */ 9298f05b51SAndy Lutomirski movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 93bf904d27SAndy Lutomirski SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 94905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 95905a36a2SIngo Molnar 96a13644f3SJoerg RoedelSYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) 97a13644f3SJoerg Roedel 98905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 99905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 10098f05b51SAndy Lutomirski pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 101905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 102905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 103905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 10426ba4e57SJiri SlabySYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) 105905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 10630907fd1SDominik Brodowski 10730907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rax=$-ENOSYS 108905a36a2SIngo Molnar 1091e423bffSAndy Lutomirski /* IRQs are off. */ 110dfe64506SLinus Torvalds movq %rax, %rdi 111dfe64506SLinus Torvalds movq %rsp, %rsi 1121e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 1131e423bffSAndy Lutomirski 114905a36a2SIngo Molnar /* 115905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 1168a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 1178a055d7fSAndy Lutomirski * go to the slow exit path. 118*afd30525SJuergen Gross * In the Xen PV case we must use iret anyway. 119905a36a2SIngo Molnar */ 120*afd30525SJuergen Gross 121*afd30525SJuergen Gross ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ 122*afd30525SJuergen Gross X86_FEATURE_XENPV 123*afd30525SJuergen Gross 124905a36a2SIngo Molnar movq RCX(%rsp), %rcx 125905a36a2SIngo Molnar movq RIP(%rsp), %r11 1268a055d7fSAndy Lutomirski 1278a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 1288a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 129905a36a2SIngo Molnar 130905a36a2SIngo Molnar /* 131905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 132905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 133905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 134905a36a2SIngo Molnar * 135905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 136905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 137361b4b58SKirill A. Shutemov * 138cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 139cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 140905a36a2SIngo Molnar */ 14109e61a77SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 14239b95522SKirill A. Shutemov ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 14339b95522SKirill A. Shutemov "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 14409e61a77SKirill A. Shutemov#else 145905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 146905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 14709e61a77SKirill A. Shutemov#endif 1484d732138SIngo Molnar 149905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 150905a36a2SIngo Molnar cmpq %rcx, %r11 1518a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 152905a36a2SIngo Molnar 153905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 1548a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 155905a36a2SIngo Molnar 156905a36a2SIngo Molnar movq R11(%rsp), %r11 157905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 1588a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 159905a36a2SIngo Molnar 160905a36a2SIngo Molnar /* 1613e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 1623e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 1633e035305SBorislav Petkov * need to restore it correctly. 1643e035305SBorislav Petkov * 1653e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 1663e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 1673e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 1683e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 1693e035305SBorislav Petkov * this user code: 170905a36a2SIngo Molnar * 171905a36a2SIngo Molnar * movq $stuck_here, %rcx 172905a36a2SIngo Molnar * pushfq 173905a36a2SIngo Molnar * popq %r11 174905a36a2SIngo Molnar * stuck_here: 175905a36a2SIngo Molnar * 176905a36a2SIngo Molnar * would never get past 'stuck_here'. 177905a36a2SIngo Molnar */ 178905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 1798a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 180905a36a2SIngo Molnar 181905a36a2SIngo Molnar /* nothing to check for RSP */ 182905a36a2SIngo Molnar 183905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 1848a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 185905a36a2SIngo Molnar 186905a36a2SIngo Molnar /* 187905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 188905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 189905a36a2SIngo Molnar */ 190905a36a2SIngo Molnarsyscall_return_via_sysret: 191905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 192502af0d7SDominik Brodowski POP_REGS pop_rdi=0 skip_r11rcx=1 1933e3b9293SAndy Lutomirski 1943e3b9293SAndy Lutomirski /* 1953e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 1963e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 1973e3b9293SAndy Lutomirski */ 1983e3b9293SAndy Lutomirski movq %rsp, %rdi 199c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 2001fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 2013e3b9293SAndy Lutomirski 2023e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 2033e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 2043e3b9293SAndy Lutomirski 2053e3b9293SAndy Lutomirski /* 2063e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 2073e3b9293SAndy Lutomirski * We can do future final exit work right here. 2083e3b9293SAndy Lutomirski */ 209afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 210afaef01cSAlexander Popov 2116fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 2123e3b9293SAndy Lutomirski 2134fbb3910SAndy Lutomirski popq %rdi 2143e3b9293SAndy Lutomirski popq %rsp 215*afd30525SJuergen Gross swapgs 216*afd30525SJuergen Gross sysretq 217bc7b11c0SJiri SlabySYM_CODE_END(entry_SYSCALL_64) 218905a36a2SIngo Molnar 219905a36a2SIngo Molnar/* 2200100301bSBrian Gerst * %rdi: prev task 2210100301bSBrian Gerst * %rsi: next task 2220100301bSBrian Gerst */ 223b9f6976bSThomas Gleixner.pushsection .text, "ax" 22496c64806SJosh PoimboeufSYM_FUNC_START(__switch_to_asm) 2250100301bSBrian Gerst /* 2260100301bSBrian Gerst * Save callee-saved registers 2270100301bSBrian Gerst * This must match the order in inactive_task_frame 2280100301bSBrian Gerst */ 2290100301bSBrian Gerst pushq %rbp 2300100301bSBrian Gerst pushq %rbx 2310100301bSBrian Gerst pushq %r12 2320100301bSBrian Gerst pushq %r13 2330100301bSBrian Gerst pushq %r14 2340100301bSBrian Gerst pushq %r15 2350100301bSBrian Gerst 2360100301bSBrian Gerst /* switch stack */ 2370100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 2380100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 2390100301bSBrian Gerst 240050e9baaSLinus Torvalds#ifdef CONFIG_STACKPROTECTOR 2410100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 242e6401c13SAndy Lutomirski movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 2430100301bSBrian Gerst#endif 2440100301bSBrian Gerst 245c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 246c995efd5SDavid Woodhouse /* 247c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 248c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 249c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 250c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 251c995efd5SDavid Woodhouse * speculative execution to prevent attack. 252c995efd5SDavid Woodhouse */ 253d1c99108SDavid Woodhouse FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 254c995efd5SDavid Woodhouse#endif 255c995efd5SDavid Woodhouse 2560100301bSBrian Gerst /* restore callee-saved registers */ 2570100301bSBrian Gerst popq %r15 2580100301bSBrian Gerst popq %r14 2590100301bSBrian Gerst popq %r13 2600100301bSBrian Gerst popq %r12 2610100301bSBrian Gerst popq %rbx 2620100301bSBrian Gerst popq %rbp 2630100301bSBrian Gerst 2640100301bSBrian Gerst jmp __switch_to 26596c64806SJosh PoimboeufSYM_FUNC_END(__switch_to_asm) 266b9f6976bSThomas Gleixner.popsection 2670100301bSBrian Gerst 2680100301bSBrian Gerst/* 269905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 270905a36a2SIngo Molnar * 2710100301bSBrian Gerst * rax: prev task we switched from 272616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 273616d2483SBrian Gerst * r12: kernel thread arg 274905a36a2SIngo Molnar */ 275b9f6976bSThomas Gleixner.pushsection .text, "ax" 276bc7b11c0SJiri SlabySYM_CODE_START(ret_from_fork) 2778c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 2780100301bSBrian Gerst movq %rax, %rdi 2794d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 280905a36a2SIngo Molnar 281616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 282616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 283905a36a2SIngo Molnar 284616d2483SBrian Gerst2: 2858c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 286ebd57499SJosh Poimboeuf movq %rsp, %rdi 287167fd210SThomas Gleixner call syscall_exit_to_user_mode /* returns with IRQs disabled */ 2888a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 289616d2483SBrian Gerst 290616d2483SBrian Gerst1: 291616d2483SBrian Gerst /* kernel thread */ 292d31a5802SJosh Poimboeuf UNWIND_HINT_EMPTY 293616d2483SBrian Gerst movq %r12, %rdi 29434fdce69SPeter Zijlstra CALL_NOSPEC rbx 295616d2483SBrian Gerst /* 296616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 297be619f7fSEric W. Biederman * calling kernel_execve(). Exit to userspace to complete the execve() 298616d2483SBrian Gerst * syscall. 299616d2483SBrian Gerst */ 300616d2483SBrian Gerst movq $0, RAX(%rsp) 301616d2483SBrian Gerst jmp 2b 302bc7b11c0SJiri SlabySYM_CODE_END(ret_from_fork) 303b9f6976bSThomas Gleixner.popsection 304905a36a2SIngo Molnar 3051d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 3061d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 307e17f8234SBoris Ostrovsky pushq %rax 308e17f8234SBoris Ostrovsky SAVE_FLAGS(CLBR_RAX) 309e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 3101d3e53e8SAndy Lutomirski jz .Lokay_\@ 3111d3e53e8SAndy Lutomirski ud2 3121d3e53e8SAndy Lutomirski.Lokay_\@: 313e17f8234SBoris Ostrovsky popq %rax 3141d3e53e8SAndy Lutomirski#endif 3151d3e53e8SAndy Lutomirski.endm 3161d3e53e8SAndy Lutomirski 317cfa82a00SThomas Gleixner/** 318cfa82a00SThomas Gleixner * idtentry_body - Macro to emit code calling the C function 319cfa82a00SThomas Gleixner * @cfunc: C function to be called 320cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 321cfa82a00SThomas Gleixner */ 322e2dcb5f1SThomas Gleixner.macro idtentry_body cfunc has_error_code:req 323cfa82a00SThomas Gleixner 324cfa82a00SThomas Gleixner call error_entry 325cfa82a00SThomas Gleixner UNWIND_HINT_REGS 326cfa82a00SThomas Gleixner 327cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ 328cfa82a00SThomas Gleixner 329cfa82a00SThomas Gleixner .if \has_error_code == 1 330cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 331cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 332cfa82a00SThomas Gleixner .endif 333cfa82a00SThomas Gleixner 334cfa82a00SThomas Gleixner call \cfunc 335cfa82a00SThomas Gleixner 336424c7d0aSThomas Gleixner jmp error_return 337cfa82a00SThomas Gleixner.endm 338cfa82a00SThomas Gleixner 339cfa82a00SThomas Gleixner/** 340cfa82a00SThomas Gleixner * idtentry - Macro to generate entry stubs for simple IDT entries 341cfa82a00SThomas Gleixner * @vector: Vector number 342cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 343cfa82a00SThomas Gleixner * @cfunc: C function to be called 344cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 345cfa82a00SThomas Gleixner * 346cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for straight forward 347cfa82a00SThomas Gleixner * and simple IDT entries. No IST stack, no paranoid entry checks. 348cfa82a00SThomas Gleixner */ 349e2dcb5f1SThomas Gleixner.macro idtentry vector asmsym cfunc has_error_code:req 350cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 351cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=\has_error_code*8 352cfa82a00SThomas Gleixner ASM_CLAC 353cfa82a00SThomas Gleixner 354cfa82a00SThomas Gleixner .if \has_error_code == 0 355cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 356cfa82a00SThomas Gleixner .endif 357cfa82a00SThomas Gleixner 358cfa82a00SThomas Gleixner .if \vector == X86_TRAP_BP 359cfa82a00SThomas Gleixner /* 360cfa82a00SThomas Gleixner * If coming from kernel space, create a 6-word gap to allow the 361cfa82a00SThomas Gleixner * int3 handler to emulate a call instruction. 362cfa82a00SThomas Gleixner */ 363cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 364cfa82a00SThomas Gleixner jnz .Lfrom_usermode_no_gap_\@ 365cfa82a00SThomas Gleixner .rept 6 366cfa82a00SThomas Gleixner pushq 5*8(%rsp) 367cfa82a00SThomas Gleixner .endr 368cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 369cfa82a00SThomas Gleixner.Lfrom_usermode_no_gap_\@: 370cfa82a00SThomas Gleixner .endif 371cfa82a00SThomas Gleixner 372e2dcb5f1SThomas Gleixner idtentry_body \cfunc \has_error_code 373cfa82a00SThomas Gleixner 374cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 375cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 376cfa82a00SThomas Gleixner.endm 377cfa82a00SThomas Gleixner 378cfa82a00SThomas Gleixner/* 3790bf7c314SThomas Gleixner * Interrupt entry/exit. 3800bf7c314SThomas Gleixner * 3810bf7c314SThomas Gleixner + The interrupt stubs push (vector) onto the stack, which is the error_code 3820bf7c314SThomas Gleixner * position of idtentry exceptions, and jump to one of the two idtentry points 3830bf7c314SThomas Gleixner * (common/spurious). 3840bf7c314SThomas Gleixner * 3850bf7c314SThomas Gleixner * common_interrupt is a hotpath, align it to a cache line 3860bf7c314SThomas Gleixner */ 3870bf7c314SThomas Gleixner.macro idtentry_irq vector cfunc 3880bf7c314SThomas Gleixner .p2align CONFIG_X86_L1_CACHE_SHIFT 3890bf7c314SThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=1 3900bf7c314SThomas Gleixner.endm 3910bf7c314SThomas Gleixner 3920bf7c314SThomas Gleixner/* 3936368558cSThomas Gleixner * System vectors which invoke their handlers directly and are not 3946368558cSThomas Gleixner * going through the regular common device interrupt handling code. 3956368558cSThomas Gleixner */ 3966368558cSThomas Gleixner.macro idtentry_sysvec vector cfunc 3976368558cSThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=0 3986368558cSThomas Gleixner.endm 3996368558cSThomas Gleixner 400cfa82a00SThomas Gleixner/** 401cfa82a00SThomas Gleixner * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB 402cfa82a00SThomas Gleixner * @vector: Vector number 403cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 404cfa82a00SThomas Gleixner * @cfunc: C function to be called 405cfa82a00SThomas Gleixner * 406cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for #MC and #DB 407cfa82a00SThomas Gleixner * 408cfa82a00SThomas Gleixner * If the entry comes from user space it uses the normal entry path 409cfa82a00SThomas Gleixner * including the return to user space work and preemption checks on 410cfa82a00SThomas Gleixner * exit. 411cfa82a00SThomas Gleixner * 412cfa82a00SThomas Gleixner * If hits in kernel mode then it needs to go through the paranoid 413cfa82a00SThomas Gleixner * entry as the exception can hit any random state. No preemption 414cfa82a00SThomas Gleixner * check on exit to keep the paranoid path simple. 415cfa82a00SThomas Gleixner */ 416cfa82a00SThomas Gleixner.macro idtentry_mce_db vector asmsym cfunc 417cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 418cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS 419cfa82a00SThomas Gleixner ASM_CLAC 420cfa82a00SThomas Gleixner 421cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 422cfa82a00SThomas Gleixner 423cfa82a00SThomas Gleixner /* 424cfa82a00SThomas Gleixner * If the entry is from userspace, switch stacks and treat it as 425cfa82a00SThomas Gleixner * a normal entry. 426cfa82a00SThomas Gleixner */ 427cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 428cfa82a00SThomas Gleixner jnz .Lfrom_usermode_switch_stack_\@ 429cfa82a00SThomas Gleixner 430c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 431cfa82a00SThomas Gleixner call paranoid_entry 432cfa82a00SThomas Gleixner 433cfa82a00SThomas Gleixner UNWIND_HINT_REGS 434cfa82a00SThomas Gleixner 435cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer */ 436cfa82a00SThomas Gleixner 437cfa82a00SThomas Gleixner call \cfunc 438cfa82a00SThomas Gleixner 439cfa82a00SThomas Gleixner jmp paranoid_exit 440cfa82a00SThomas Gleixner 441cfa82a00SThomas Gleixner /* Switch to the regular task stack and use the noist entry point */ 442cfa82a00SThomas Gleixner.Lfrom_usermode_switch_stack_\@: 443e2dcb5f1SThomas Gleixner idtentry_body noist_\cfunc, has_error_code=0 444cfa82a00SThomas Gleixner 445cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 446cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 447cfa82a00SThomas Gleixner.endm 448cfa82a00SThomas Gleixner 449a13644f3SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 450a13644f3SJoerg Roedel/** 451a13644f3SJoerg Roedel * idtentry_vc - Macro to generate entry stub for #VC 452a13644f3SJoerg Roedel * @vector: Vector number 453a13644f3SJoerg Roedel * @asmsym: ASM symbol for the entry point 454a13644f3SJoerg Roedel * @cfunc: C function to be called 455a13644f3SJoerg Roedel * 456a13644f3SJoerg Roedel * The macro emits code to set up the kernel context for #VC. The #VC handler 457a13644f3SJoerg Roedel * runs on an IST stack and needs to be able to cause nested #VC exceptions. 458a13644f3SJoerg Roedel * 459a13644f3SJoerg Roedel * To make this work the #VC entry code tries its best to pretend it doesn't use 460a13644f3SJoerg Roedel * an IST stack by switching to the task stack if coming from user-space (which 461a13644f3SJoerg Roedel * includes early SYSCALL entry path) or back to the stack in the IRET frame if 462a13644f3SJoerg Roedel * entered from kernel-mode. 463a13644f3SJoerg Roedel * 464a13644f3SJoerg Roedel * If entered from kernel-mode the return stack is validated first, and if it is 465a13644f3SJoerg Roedel * not safe to use (e.g. because it points to the entry stack) the #VC handler 466a13644f3SJoerg Roedel * will switch to a fall-back stack (VC2) and call a special handler function. 467a13644f3SJoerg Roedel * 468a13644f3SJoerg Roedel * The macro is only used for one vector, but it is planned to be extended in 469a13644f3SJoerg Roedel * the future for the #HV exception. 470a13644f3SJoerg Roedel */ 471a13644f3SJoerg Roedel.macro idtentry_vc vector asmsym cfunc 472a13644f3SJoerg RoedelSYM_CODE_START(\asmsym) 473a13644f3SJoerg Roedel UNWIND_HINT_IRET_REGS 474a13644f3SJoerg Roedel ASM_CLAC 475a13644f3SJoerg Roedel 476a13644f3SJoerg Roedel /* 477a13644f3SJoerg Roedel * If the entry is from userspace, switch stacks and treat it as 478a13644f3SJoerg Roedel * a normal entry. 479a13644f3SJoerg Roedel */ 480a13644f3SJoerg Roedel testb $3, CS-ORIG_RAX(%rsp) 481a13644f3SJoerg Roedel jnz .Lfrom_usermode_switch_stack_\@ 482a13644f3SJoerg Roedel 483a13644f3SJoerg Roedel /* 484a13644f3SJoerg Roedel * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. 485a13644f3SJoerg Roedel * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS 486a13644f3SJoerg Roedel */ 487a13644f3SJoerg Roedel call paranoid_entry 488a13644f3SJoerg Roedel 489a13644f3SJoerg Roedel UNWIND_HINT_REGS 490a13644f3SJoerg Roedel 491a13644f3SJoerg Roedel /* 492a13644f3SJoerg Roedel * Switch off the IST stack to make it free for nested exceptions. The 493a13644f3SJoerg Roedel * vc_switch_off_ist() function will switch back to the interrupted 494a13644f3SJoerg Roedel * stack if it is safe to do so. If not it switches to the VC fall-back 495a13644f3SJoerg Roedel * stack. 496a13644f3SJoerg Roedel */ 497a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 498a13644f3SJoerg Roedel call vc_switch_off_ist 499a13644f3SJoerg Roedel movq %rax, %rsp /* Switch to new stack */ 500a13644f3SJoerg Roedel 501a13644f3SJoerg Roedel UNWIND_HINT_REGS 502a13644f3SJoerg Roedel 503a13644f3SJoerg Roedel /* Update pt_regs */ 504a13644f3SJoerg Roedel movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 505a13644f3SJoerg Roedel movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 506a13644f3SJoerg Roedel 507a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 508a13644f3SJoerg Roedel 509a13644f3SJoerg Roedel call \cfunc 510a13644f3SJoerg Roedel 511a13644f3SJoerg Roedel /* 512a13644f3SJoerg Roedel * No need to switch back to the IST stack. The current stack is either 513a13644f3SJoerg Roedel * identical to the stack in the IRET frame or the VC fall-back stack, 514a13644f3SJoerg Roedel * so it is definitly mapped even with PTI enabled. 515a13644f3SJoerg Roedel */ 516a13644f3SJoerg Roedel jmp paranoid_exit 517a13644f3SJoerg Roedel 518a13644f3SJoerg Roedel /* Switch to the regular task stack */ 519a13644f3SJoerg Roedel.Lfrom_usermode_switch_stack_\@: 520a13644f3SJoerg Roedel idtentry_body safe_stack_\cfunc, has_error_code=1 521a13644f3SJoerg Roedel 522a13644f3SJoerg Roedel_ASM_NOKPROBE(\asmsym) 523a13644f3SJoerg RoedelSYM_CODE_END(\asmsym) 524a13644f3SJoerg Roedel.endm 525a13644f3SJoerg Roedel#endif 526a13644f3SJoerg Roedel 527cfa82a00SThomas Gleixner/* 528cfa82a00SThomas Gleixner * Double fault entry. Straight paranoid. No checks from which context 529cfa82a00SThomas Gleixner * this comes because for the espfix induced #DF this would do the wrong 530cfa82a00SThomas Gleixner * thing. 531cfa82a00SThomas Gleixner */ 532cfa82a00SThomas Gleixner.macro idtentry_df vector asmsym cfunc 533cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 534cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 535cfa82a00SThomas Gleixner ASM_CLAC 536cfa82a00SThomas Gleixner 537c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 538cfa82a00SThomas Gleixner call paranoid_entry 539cfa82a00SThomas Gleixner UNWIND_HINT_REGS 540cfa82a00SThomas Gleixner 541cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into first argument */ 542cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 543cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 544cfa82a00SThomas Gleixner call \cfunc 545cfa82a00SThomas Gleixner 546cfa82a00SThomas Gleixner jmp paranoid_exit 547cfa82a00SThomas Gleixner 548cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 549cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 550cfa82a00SThomas Gleixner.endm 551cfa82a00SThomas Gleixner 552905a36a2SIngo Molnar/* 55353aaf262SThomas Gleixner * Include the defines which emit the idt entries which are shared 554f0178fc0SThomas Gleixner * shared between 32 and 64 bit and emit the __irqentry_text_* markers 555f0178fc0SThomas Gleixner * so the stacktrace boundary checks work. 55653aaf262SThomas Gleixner */ 557f0178fc0SThomas Gleixner .align 16 558f0178fc0SThomas Gleixner .globl __irqentry_text_start 559f0178fc0SThomas Gleixner__irqentry_text_start: 560f0178fc0SThomas Gleixner 56153aaf262SThomas Gleixner#include <asm/idtentry.h> 56253aaf262SThomas Gleixner 563f0178fc0SThomas Gleixner .align 16 564f0178fc0SThomas Gleixner .globl __irqentry_text_end 565f0178fc0SThomas Gleixner__irqentry_text_end: 566f0178fc0SThomas Gleixner 567fa5e5c40SThomas GleixnerSYM_CODE_START_LOCAL(common_interrupt_return) 56826ba4e57SJiri SlabySYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) 56926c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 57026c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 5711e4c4f61SBorislav Petkov testb $3, CS(%rsp) 57226c4ef9cSAndy Lutomirski jnz 1f 57326c4ef9cSAndy Lutomirski ud2 57426c4ef9cSAndy Lutomirski1: 57526c4ef9cSAndy Lutomirski#endif 576502af0d7SDominik Brodowski POP_REGS pop_rdi=0 5773e3b9293SAndy Lutomirski 5783e3b9293SAndy Lutomirski /* 5793e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 5803e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 5813e3b9293SAndy Lutomirski */ 5823e3b9293SAndy Lutomirski movq %rsp, %rdi 583c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 5841fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 5853e3b9293SAndy Lutomirski 5863e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 5873e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 5883e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 5893e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 5903e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 5913e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 5923e3b9293SAndy Lutomirski 5933e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 5943e3b9293SAndy Lutomirski pushq (%rdi) 5953e3b9293SAndy Lutomirski 5963e3b9293SAndy Lutomirski /* 5973e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 5983e3b9293SAndy Lutomirski * We can do future final exit work right here. 5993e3b9293SAndy Lutomirski */ 600afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 6013e3b9293SAndy Lutomirski 6026fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 6038a09317bSDave Hansen 6043e3b9293SAndy Lutomirski /* Restore RDI. */ 6053e3b9293SAndy Lutomirski popq %rdi 6063e3b9293SAndy Lutomirski SWAPGS 60726c4ef9cSAndy Lutomirski INTERRUPT_RETURN 60826c4ef9cSAndy Lutomirski 609905a36a2SIngo Molnar 61026ba4e57SJiri SlabySYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) 61126c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 61226c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 6131e4c4f61SBorislav Petkov testb $3, CS(%rsp) 61426c4ef9cSAndy Lutomirski jz 1f 61526c4ef9cSAndy Lutomirski ud2 61626c4ef9cSAndy Lutomirski1: 61726c4ef9cSAndy Lutomirski#endif 618502af0d7SDominik Brodowski POP_REGS 619e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 62010bcc80eSMathieu Desnoyers /* 62110bcc80eSMathieu Desnoyers * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 62210bcc80eSMathieu Desnoyers * when returning from IPI handler. 62310bcc80eSMathieu Desnoyers */ 624905a36a2SIngo Molnar INTERRUPT_RETURN 625905a36a2SIngo Molnar 626cc66936eSJiri SlabySYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) 6278c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 628905a36a2SIngo Molnar /* 629905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 630905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 631905a36a2SIngo Molnar */ 632905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 633905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 634905a36a2SIngo Molnar jnz native_irq_return_ldt 635905a36a2SIngo Molnar#endif 636905a36a2SIngo Molnar 637cc66936eSJiri SlabySYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) 638905a36a2SIngo Molnar /* 639905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 640905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 641c29c775aSThomas Gleixner * Double-faults due to espfix64 are handled in exc_double_fault. 642905a36a2SIngo Molnar * Other faults here are fatal. 643905a36a2SIngo Molnar */ 644905a36a2SIngo Molnar iretq 645905a36a2SIngo Molnar 646905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 647905a36a2SIngo Molnarnative_irq_return_ldt: 64885063facSAndy Lutomirski /* 64985063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 65085063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 65185063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 65285063facSAndy Lutomirski * of the ESPFIX stack. 65385063facSAndy Lutomirski * 65485063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 65585063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 65685063facSAndy Lutomirski * 65785063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 65885063facSAndy Lutomirski * 65985063facSAndy Lutomirski * --- top of ESPFIX stack --- 66085063facSAndy Lutomirski * SS 66185063facSAndy Lutomirski * RSP 66285063facSAndy Lutomirski * RFLAGS 66385063facSAndy Lutomirski * CS 66485063facSAndy Lutomirski * RIP <-- RSP points here when we're done 66585063facSAndy Lutomirski * RAX <-- espfix_waddr points here 66685063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 66785063facSAndy Lutomirski */ 66885063facSAndy Lutomirski 66985063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 67053c9d924SJuergen Gross swapgs /* to kernel GS */ 6718a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 6728a09317bSDave Hansen 673905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 67485063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 67585063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 676905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 67785063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 678905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 67985063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 680905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 68185063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 682905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 68385063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 684905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 68585063facSAndy Lutomirski /* Now RAX == RSP. */ 68685063facSAndy Lutomirski 68785063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 68885063facSAndy Lutomirski 68985063facSAndy Lutomirski /* 69085063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 69185063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 69285063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 69385063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 69485063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 69585063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 69685063facSAndy Lutomirski */ 697905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 6988a09317bSDave Hansen 6996fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 70053c9d924SJuergen Gross swapgs /* to user GS */ 7018a09317bSDave Hansen popq %rdi /* Restore user RDI */ 7028a09317bSDave Hansen 703905a36a2SIngo Molnar movq %rax, %rsp 7048c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 70585063facSAndy Lutomirski 70685063facSAndy Lutomirski /* 70785063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 70885063facSAndy Lutomirski * still read. 70985063facSAndy Lutomirski */ 71085063facSAndy Lutomirski popq %rax /* Restore user RAX */ 71185063facSAndy Lutomirski 71285063facSAndy Lutomirski /* 71385063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 71485063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 71585063facSAndy Lutomirski * values. We can now IRET back to userspace. 71685063facSAndy Lutomirski */ 717905a36a2SIngo Molnar jmp native_irq_return_iret 718905a36a2SIngo Molnar#endif 719fa5e5c40SThomas GleixnerSYM_CODE_END(common_interrupt_return) 720fa5e5c40SThomas Gleixner_ASM_NOKPROBE(common_interrupt_return) 721905a36a2SIngo Molnar 722905a36a2SIngo Molnar/* 7234d732138SIngo Molnar * Reload gs selector with exception handling 7244d732138SIngo Molnar * edi: new selector 725b9f6976bSThomas Gleixner * 726b9f6976bSThomas Gleixner * Is in entry.text as it shouldn't be instrumented. 7274d732138SIngo Molnar */ 728410367e3SThomas GleixnerSYM_FUNC_START(asm_load_gs_index) 7298c1f7558SJosh Poimboeuf FRAME_BEGIN 730c9317202SThomas Gleixner swapgs 73142c748bbSBorislav Petkov.Lgs_change: 732905a36a2SIngo Molnar movl %edi, %gs 73396e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 734c9317202SThomas Gleixner swapgs 7358c1f7558SJosh Poimboeuf FRAME_END 736905a36a2SIngo Molnar ret 737410367e3SThomas GleixnerSYM_FUNC_END(asm_load_gs_index) 738410367e3SThomas GleixnerEXPORT_SYMBOL(asm_load_gs_index) 739905a36a2SIngo Molnar 74098ededb6SJiri Slaby _ASM_EXTABLE(.Lgs_change, .Lbad_gs) 741905a36a2SIngo Molnar .section .fixup, "ax" 742905a36a2SIngo Molnar /* running with kernelgs */ 743ef77e688SJiri SlabySYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) 744c9317202SThomas Gleixner swapgs /* switch back to user gs */ 745b038c842SAndy Lutomirski.macro ZAP_GS 746b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 747b038c842SAndy Lutomirski movl $__USER_DS, %eax 748b038c842SAndy Lutomirski movl %eax, %gs 749b038c842SAndy Lutomirski.endm 750b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 751905a36a2SIngo Molnar xorl %eax, %eax 752905a36a2SIngo Molnar movl %eax, %gs 753905a36a2SIngo Molnar jmp 2b 754ef77e688SJiri SlabySYM_CODE_END(.Lbad_gs) 755905a36a2SIngo Molnar .previous 756905a36a2SIngo Molnar 757931b9414SThomas Gleixner/* 758931b9414SThomas Gleixner * rdi: New stack pointer points to the top word of the stack 759931b9414SThomas Gleixner * rsi: Function pointer 760931b9414SThomas Gleixner * rdx: Function argument (can be NULL if none) 761931b9414SThomas Gleixner */ 762931b9414SThomas GleixnerSYM_FUNC_START(asm_call_on_stack) 763a7b3474cSThomas GleixnerSYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) 764a7b3474cSThomas GleixnerSYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) 765931b9414SThomas Gleixner /* 766931b9414SThomas Gleixner * Save the frame pointer unconditionally. This allows the ORC 767931b9414SThomas Gleixner * unwinder to handle the stack switch. 768931b9414SThomas Gleixner */ 769931b9414SThomas Gleixner pushq %rbp 770931b9414SThomas Gleixner mov %rsp, %rbp 771931b9414SThomas Gleixner 772931b9414SThomas Gleixner /* 773931b9414SThomas Gleixner * The unwinder relies on the word at the top of the new stack 774931b9414SThomas Gleixner * page linking back to the previous RSP. 775931b9414SThomas Gleixner */ 776931b9414SThomas Gleixner mov %rsp, (%rdi) 777931b9414SThomas Gleixner mov %rdi, %rsp 778931b9414SThomas Gleixner /* Move the argument to the right place */ 779931b9414SThomas Gleixner mov %rdx, %rdi 780931b9414SThomas Gleixner 781931b9414SThomas Gleixner1: 782931b9414SThomas Gleixner .pushsection .discard.instr_begin 783931b9414SThomas Gleixner .long 1b - . 784931b9414SThomas Gleixner .popsection 785931b9414SThomas Gleixner 786931b9414SThomas Gleixner CALL_NOSPEC rsi 787931b9414SThomas Gleixner 788931b9414SThomas Gleixner2: 789931b9414SThomas Gleixner .pushsection .discard.instr_end 790931b9414SThomas Gleixner .long 2b - . 791931b9414SThomas Gleixner .popsection 792931b9414SThomas Gleixner 793931b9414SThomas Gleixner /* Restore the previous stack pointer from RBP. */ 794931b9414SThomas Gleixner leaveq 795931b9414SThomas Gleixner ret 796931b9414SThomas GleixnerSYM_FUNC_END(asm_call_on_stack) 797931b9414SThomas Gleixner 79828c11b0fSJuergen Gross#ifdef CONFIG_XEN_PV 799905a36a2SIngo Molnar/* 800905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 801905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 802905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 803905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 804905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 805905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 806905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 807905a36a2SIngo Molnar * like to avoid the possibility. 808905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 809905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 810905a36a2SIngo Molnar * activation and restart the handler using the previous one. 8112f6474e4SThomas Gleixner * 8122f6474e4SThomas Gleixner * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) 813905a36a2SIngo Molnar */ 8142f6474e4SThomas GleixnerSYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) 8154d732138SIngo Molnar 816905a36a2SIngo Molnar/* 817905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 818905a36a2SIngo Molnar * see the correct pointer to the pt_regs 819905a36a2SIngo Molnar */ 8208c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 8214d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 8228c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 8231d3e53e8SAndy Lutomirski 8242f6474e4SThomas Gleixner call xen_pv_evtchn_do_upcall 8251d3e53e8SAndy Lutomirski 8262f6474e4SThomas Gleixner jmp error_return 8272f6474e4SThomas GleixnerSYM_CODE_END(exc_xen_hypervisor_callback) 828905a36a2SIngo Molnar 829905a36a2SIngo Molnar/* 830905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 831905a36a2SIngo Molnar * We get here for two reasons: 832905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 833905a36a2SIngo Molnar * 2. Fault while executing IRET 834905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 835905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 836905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 837905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 838905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 839905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 840905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 841905a36a2SIngo Molnar */ 842bc7b11c0SJiri SlabySYM_CODE_START(xen_failsafe_callback) 8438c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 844905a36a2SIngo Molnar movl %ds, %ecx 845905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 846905a36a2SIngo Molnar jne 1f 847905a36a2SIngo Molnar movl %es, %ecx 848905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 849905a36a2SIngo Molnar jne 1f 850905a36a2SIngo Molnar movl %fs, %ecx 851905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 852905a36a2SIngo Molnar jne 1f 853905a36a2SIngo Molnar movl %gs, %ecx 854905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 855905a36a2SIngo Molnar jne 1f 856905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 857905a36a2SIngo Molnar movq (%rsp), %rcx 858905a36a2SIngo Molnar movq 8(%rsp), %r11 859905a36a2SIngo Molnar addq $0x30, %rsp 860905a36a2SIngo Molnar pushq $0 /* RIP */ 8618c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 862be4c11afSThomas Gleixner jmp asm_exc_general_protection 863905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 864905a36a2SIngo Molnar movq (%rsp), %rcx 865905a36a2SIngo Molnar movq 8(%rsp), %r11 866905a36a2SIngo Molnar addq $0x30, %rsp 8678c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 868905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 8693f01daecSDominik Brodowski PUSH_AND_CLEAR_REGS 870946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 871e88d9741SThomas Gleixner jmp error_return 872bc7b11c0SJiri SlabySYM_CODE_END(xen_failsafe_callback) 87328c11b0fSJuergen Gross#endif /* CONFIG_XEN_PV */ 874905a36a2SIngo Molnar 875905a36a2SIngo Molnar/* 876c82965f9SChang S. Bae * Save all registers in pt_regs. Return GSBASE related information 877c82965f9SChang S. Bae * in EBX depending on the availability of the FSGSBASE instructions: 878c82965f9SChang S. Bae * 879c82965f9SChang S. Bae * FSGSBASE R/EBX 880c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 881c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 882c82965f9SChang S. Bae * 883c82965f9SChang S. Bae * Y GSBASE value at entry, must be restored in paranoid_exit 884905a36a2SIngo Molnar */ 885ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_entry) 8868c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 887905a36a2SIngo Molnar cld 8889e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 8899e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 8908a09317bSDave Hansen 89116561f27SDave Hansen /* 89216561f27SDave Hansen * Always stash CR3 in %r14. This value will be restored, 893ae852495SAndy Lutomirski * verbatim, at exit. Needed if paranoid_entry interrupted 894ae852495SAndy Lutomirski * another entry that already switched to the user CR3 value 895ae852495SAndy Lutomirski * but has not yet returned to userspace. 89616561f27SDave Hansen * 89716561f27SDave Hansen * This is also why CS (stashed in the "iret frame" by the 89816561f27SDave Hansen * hardware at entry) can not be used: this may be a return 899ae852495SAndy Lutomirski * to kernel code, but with a user CR3 value. 90096b23714SChang S. Bae * 90196b23714SChang S. Bae * Switching CR3 does not depend on kernel GSBASE so it can 90296b23714SChang S. Bae * be done before switching to the kernel GSBASE. This is 90396b23714SChang S. Bae * required for FSGSBASE because the kernel GSBASE has to 90496b23714SChang S. Bae * be retrieved from a kernel internal table. 90516561f27SDave Hansen */ 9068a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 9078a09317bSDave Hansen 90818ec54fdSJosh Poimboeuf /* 909c82965f9SChang S. Bae * Handling GSBASE depends on the availability of FSGSBASE. 910c82965f9SChang S. Bae * 911c82965f9SChang S. Bae * Without FSGSBASE the kernel enforces that negative GSBASE 912c82965f9SChang S. Bae * values indicate kernel GSBASE. With FSGSBASE no assumptions 913c82965f9SChang S. Bae * can be made about the GSBASE value when entering from user 914c82965f9SChang S. Bae * space. 915c82965f9SChang S. Bae */ 916c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 917c82965f9SChang S. Bae 918c82965f9SChang S. Bae /* 919c82965f9SChang S. Bae * Read the current GSBASE and store it in %rbx unconditionally, 920c82965f9SChang S. Bae * retrieve and set the current CPUs kernel GSBASE. The stored value 921c82965f9SChang S. Bae * has to be restored in paranoid_exit unconditionally. 922c82965f9SChang S. Bae * 9230b2c605fSBorislav Petkov * The unconditional write to GS base below ensures that no subsequent 9240b2c605fSBorislav Petkov * loads based on a mispredicted GS base can happen, therefore no LFENCE 9250b2c605fSBorislav Petkov * is needed here. 926c82965f9SChang S. Bae */ 927c82965f9SChang S. Bae SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 928c82965f9SChang S. Bae ret 929c82965f9SChang S. Bae 930c82965f9SChang S. Bae.Lparanoid_entry_checkgs: 93196b23714SChang S. Bae /* EBX = 1 -> kernel GSBASE active, no restore required */ 93296b23714SChang S. Bae movl $1, %ebx 93396b23714SChang S. Bae /* 93496b23714SChang S. Bae * The kernel-enforced convention is a negative GSBASE indicates 93596b23714SChang S. Bae * a kernel value. No SWAPGS needed on entry and exit. 93696b23714SChang S. Bae */ 93796b23714SChang S. Bae movl $MSR_GS_BASE, %ecx 93896b23714SChang S. Bae rdmsr 93996b23714SChang S. Bae testl %edx, %edx 94096b23714SChang S. Bae jns .Lparanoid_entry_swapgs 94196b23714SChang S. Bae ret 94296b23714SChang S. Bae 94396b23714SChang S. Bae.Lparanoid_entry_swapgs: 94453c9d924SJuergen Gross swapgs 94596b23714SChang S. Bae 94618ec54fdSJosh Poimboeuf /* 94718ec54fdSJosh Poimboeuf * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an 94818ec54fdSJosh Poimboeuf * unconditional CR3 write, even in the PTI case. So do an lfence 94918ec54fdSJosh Poimboeuf * to prevent GS speculation, regardless of whether PTI is enabled. 95018ec54fdSJosh Poimboeuf */ 95118ec54fdSJosh Poimboeuf FENCE_SWAPGS_KERNEL_ENTRY 95218ec54fdSJosh Poimboeuf 95396b23714SChang S. Bae /* EBX = 0 -> SWAPGS required on exit */ 95496b23714SChang S. Bae xorl %ebx, %ebx 9558a09317bSDave Hansen ret 956ef1e0315SJiri SlabySYM_CODE_END(paranoid_entry) 957905a36a2SIngo Molnar 958905a36a2SIngo Molnar/* 959905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 960905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 961905a36a2SIngo Molnar * from kernel space. 962905a36a2SIngo Molnar * 963905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 964905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 965c82965f9SChang S. Bae * be complicated. Fortunately, there's no good reason to try 966c82965f9SChang S. Bae * to handle preemption here. 9674d732138SIngo Molnar * 968c82965f9SChang S. Bae * R/EBX contains the GSBASE related information depending on the 969c82965f9SChang S. Bae * availability of the FSGSBASE instructions: 970c82965f9SChang S. Bae * 971c82965f9SChang S. Bae * FSGSBASE R/EBX 972c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 973c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 974c82965f9SChang S. Bae * 975c82965f9SChang S. Bae * Y User space GSBASE, must be restored unconditionally 976905a36a2SIngo Molnar */ 977ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_exit) 9788c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 979c82965f9SChang S. Bae /* 980c82965f9SChang S. Bae * The order of operations is important. RESTORE_CR3 requires 981c82965f9SChang S. Bae * kernel GSBASE. 982c82965f9SChang S. Bae * 983c82965f9SChang S. Bae * NB to anyone to try to optimize this code: this code does 984c82965f9SChang S. Bae * not execute at all for exceptions from user mode. Those 985c82965f9SChang S. Bae * exceptions go through error_exit instead. 986c82965f9SChang S. Bae */ 987c82965f9SChang S. Bae RESTORE_CR3 scratch_reg=%rax save_reg=%r14 988c82965f9SChang S. Bae 989c82965f9SChang S. Bae /* Handle the three GSBASE cases */ 990c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE 991c82965f9SChang S. Bae 992c82965f9SChang S. Bae /* With FSGSBASE enabled, unconditionally restore GSBASE */ 993c82965f9SChang S. Bae wrgsbase %rbx 99445c08383SThomas Gleixner jmp restore_regs_and_return_to_kernel 995c82965f9SChang S. Bae 996c82965f9SChang S. Bae.Lparanoid_exit_checkgs: 997c82965f9SChang S. Bae /* On non-FSGSBASE systems, conditionally do SWAPGS */ 998c82965f9SChang S. Bae testl %ebx, %ebx 999c82965f9SChang S. Bae jnz restore_regs_and_return_to_kernel 1000c82965f9SChang S. Bae 1001c82965f9SChang S. Bae /* We are returning to a context with user GSBASE */ 100253c9d924SJuergen Gross swapgs 1003e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 1004ef1e0315SJiri SlabySYM_CODE_END(paranoid_exit) 1005905a36a2SIngo Molnar 1006905a36a2SIngo Molnar/* 10079e809d15SDominik Brodowski * Save all registers in pt_regs, and switch GS if needed. 1008905a36a2SIngo Molnar */ 1009ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_entry) 10109e809d15SDominik Brodowski UNWIND_HINT_FUNC 1011905a36a2SIngo Molnar cld 10129e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 10139e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 1014905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1015cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1016539f5113SAndy Lutomirski 1017cb6f64edSAndy Lutomirski /* 1018cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1019cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1020cb6f64edSAndy Lutomirski */ 1021905a36a2SIngo Molnar SWAPGS 102218ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10238a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 10248a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1025539f5113SAndy Lutomirski 1026cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 10277f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 10287f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 10297f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 10307f2590a1SAndy Lutomirski call sync_regs 10317f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 10327f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 10337f2590a1SAndy Lutomirski pushq %r12 1034f1075053SAndy Lutomirski ret 103502bc7768SAndy Lutomirski 103618ec54fdSJosh Poimboeuf.Lerror_entry_done_lfence: 103718ec54fdSJosh Poimboeuf FENCE_SWAPGS_KERNEL_ENTRY 1038cb6f64edSAndy Lutomirski.Lerror_entry_done: 1039905a36a2SIngo Molnar ret 1040905a36a2SIngo Molnar 1041905a36a2SIngo Molnar /* 1042905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1043905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1044905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1045905a36a2SIngo Molnar * for these here too. 1046905a36a2SIngo Molnar */ 1047cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1048905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1049905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1050cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1051905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1052905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1053cb6f64edSAndy Lutomirski je .Lbstep_iret 105442c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 105518ec54fdSJosh Poimboeuf jne .Lerror_entry_done_lfence 1056539f5113SAndy Lutomirski 1057539f5113SAndy Lutomirski /* 105842c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1059539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 106042c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1061539f5113SAndy Lutomirski */ 10622fa5f04fSWanpeng Li SWAPGS 106318ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10642fa5f04fSWanpeng Li jmp .Lerror_entry_done 1065905a36a2SIngo Molnar 1066cb6f64edSAndy Lutomirski.Lbstep_iret: 1067905a36a2SIngo Molnar /* Fix truncated RIP */ 1068905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1069905a36a2SIngo Molnar /* fall through */ 1070905a36a2SIngo Molnar 1071cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1072539f5113SAndy Lutomirski /* 10738a09317bSDave Hansen * We came from an IRET to user mode, so we have user 10748a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 1075539f5113SAndy Lutomirski */ 1076905a36a2SIngo Molnar SWAPGS 107718ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10788a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1079539f5113SAndy Lutomirski 1080539f5113SAndy Lutomirski /* 1081539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1082b3681dd5SAndy Lutomirski * as if we faulted immediately after IRET. 1083539f5113SAndy Lutomirski */ 1084905a36a2SIngo Molnar mov %rsp, %rdi 1085905a36a2SIngo Molnar call fixup_bad_iret 1086905a36a2SIngo Molnar mov %rax, %rsp 1087cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1088ef1e0315SJiri SlabySYM_CODE_END(error_entry) 1089905a36a2SIngo Molnar 1090424c7d0aSThomas GleixnerSYM_CODE_START_LOCAL(error_return) 1091424c7d0aSThomas Gleixner UNWIND_HINT_REGS 1092424c7d0aSThomas Gleixner DEBUG_ENTRY_ASSERT_IRQS_OFF 1093424c7d0aSThomas Gleixner testb $3, CS(%rsp) 1094424c7d0aSThomas Gleixner jz restore_regs_and_return_to_kernel 1095424c7d0aSThomas Gleixner jmp swapgs_restore_regs_and_return_to_usermode 1096424c7d0aSThomas GleixnerSYM_CODE_END(error_return) 1097424c7d0aSThomas Gleixner 1098929bacecSAndy Lutomirski/* 1099929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1100929bacecSAndy Lutomirski * so we can use real assembly here. 11018a09317bSDave Hansen * 11028a09317bSDave Hansen * Registers: 11038a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 11048a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1105929bacecSAndy Lutomirski */ 11066271fef0SThomas GleixnerSYM_CODE_START(asm_exc_nmi) 11078c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1108929bacecSAndy Lutomirski 1109fc57a7c6SAndy Lutomirski /* 1110905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1111905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1112905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1113905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1114905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1115905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1116905a36a2SIngo Molnar * anyway. 1117905a36a2SIngo Molnar * 1118905a36a2SIngo Molnar * To handle this case we do the following: 1119905a36a2SIngo Molnar * Check the a special location on the stack that contains 1120905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1121905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1122905a36a2SIngo Molnar * is an NMI stack. 1123905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1124905a36a2SIngo Molnar * stack then: 1125905a36a2SIngo Molnar * o Set the special variable on the stack 11260b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 11270b22930eSAndy Lutomirski * stack 11280b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1129905a36a2SIngo Molnar * o Continue processing the NMI 1130905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 11310b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1132905a36a2SIngo Molnar * o return back to the first NMI 1133905a36a2SIngo Molnar * 1134905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1135905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1136905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1137905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1138905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1139905a36a2SIngo Molnar * NMI. 11409b6e6a83SAndy Lutomirski * 11419b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 11429b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 11439b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 11449b6e6a83SAndy Lutomirski * other IST entries. 1145905a36a2SIngo Molnar */ 1146905a36a2SIngo Molnar 1147e93c1730SAndy Lutomirski ASM_CLAC 1148e93c1730SAndy Lutomirski 1149905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1150905a36a2SIngo Molnar pushq %rdx 1151905a36a2SIngo Molnar 11529b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 11539b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1154905a36a2SIngo Molnar 1155905a36a2SIngo Molnar /* 11569b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 11579b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 11589b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 11599b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 11609b6e6a83SAndy Lutomirski * are off. 116183c133cfSAndy Lutomirski * 116283c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 116383c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 11649b6e6a83SAndy Lutomirski */ 11659b6e6a83SAndy Lutomirski 1166929bacecSAndy Lutomirski swapgs 11679b6e6a83SAndy Lutomirski cld 116818ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 11698a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 11709b6e6a83SAndy Lutomirski movq %rsp, %rdx 11719b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 11728c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 11739b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 11749b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 11759b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 11769b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 11779b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 11788c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 11799b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 118030907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rdx=(%rdx) 1181946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 11829b6e6a83SAndy Lutomirski 11839b6e6a83SAndy Lutomirski /* 11849b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 11859b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 11869b6e6a83SAndy Lutomirski * done with the NMI stack. 11879b6e6a83SAndy Lutomirski */ 11889b6e6a83SAndy Lutomirski 11899b6e6a83SAndy Lutomirski movq %rsp, %rdi 11909b6e6a83SAndy Lutomirski movq $-1, %rsi 11916271fef0SThomas Gleixner call exc_nmi 11929b6e6a83SAndy Lutomirski 11939b6e6a83SAndy Lutomirski /* 11949b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1195946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 11969b6e6a83SAndy Lutomirski */ 11978a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 11989b6e6a83SAndy Lutomirski 11999b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 12009b6e6a83SAndy Lutomirski /* 12010b22930eSAndy Lutomirski * Here's what our stack frame will look like: 12020b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12030b22930eSAndy Lutomirski * | original SS | 12040b22930eSAndy Lutomirski * | original Return RSP | 12050b22930eSAndy Lutomirski * | original RFLAGS | 12060b22930eSAndy Lutomirski * | original CS | 12070b22930eSAndy Lutomirski * | original RIP | 12080b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12090b22930eSAndy Lutomirski * | temp storage for rdx | 12100b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12110b22930eSAndy Lutomirski * | "NMI executing" variable | 12120b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12130b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 12140b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 12150b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 12160b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 12170b22930eSAndy Lutomirski * | iret RIP } | 12180b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12190b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 12200b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 12210b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 12220b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 12230b22930eSAndy Lutomirski * | outermost RIP } iteration. | 12240b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12250b22930eSAndy Lutomirski * | pt_regs | 12260b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12270b22930eSAndy Lutomirski * 12280b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 12290b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 12300b22930eSAndy Lutomirski * space for the asm code here. 12310b22930eSAndy Lutomirski * 12320b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 12330b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 12340b22930eSAndy Lutomirski * processing. 12350b22930eSAndy Lutomirski * 12360b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 12370b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 12380b22930eSAndy Lutomirski * frame pointing to the final return target. 12390b22930eSAndy Lutomirski */ 12400b22930eSAndy Lutomirski 12410b22930eSAndy Lutomirski /* 12420b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 12430b22930eSAndy Lutomirski * 1244a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1245a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1246a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1247a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 12486271fef0SThomas Gleixner * about to about to call exc_nmi() anyway, so we can just 1249a27507caSAndy Lutomirski * resume the outer NMI. 1250a27507caSAndy Lutomirski */ 1251a27507caSAndy Lutomirski 1252a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1253a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1254a27507caSAndy Lutomirski ja 1f 1255a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1256a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1257a27507caSAndy Lutomirski ja nested_nmi_out 1258a27507caSAndy Lutomirski1: 1259a27507caSAndy Lutomirski 1260a27507caSAndy Lutomirski /* 1261a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 12620b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 12630b22930eSAndy Lutomirski * before IRET. 1264905a36a2SIngo Molnar */ 1265905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1266905a36a2SIngo Molnar je nested_nmi 1267905a36a2SIngo Molnar 1268905a36a2SIngo Molnar /* 12690b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 12700b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1271810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1272810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1273810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1274810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1275810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1276810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1277810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1278810bc075SAndy Lutomirski * "NMI executing". 1279905a36a2SIngo Molnar */ 1280905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1281905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1282905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1283905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1284905a36a2SIngo Molnar ja first_nmi 12854d732138SIngo Molnar 1286905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1287905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1288905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1289905a36a2SIngo Molnar jb first_nmi 1290810bc075SAndy Lutomirski 1291810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1292810bc075SAndy Lutomirski 1293810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1294810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1295810bc075SAndy Lutomirski 1296810bc075SAndy Lutomirski /* This is a nested NMI. */ 1297905a36a2SIngo Molnar 1298905a36a2SIngo Molnarnested_nmi: 1299905a36a2SIngo Molnar /* 13000b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 13010b22930eSAndy Lutomirski * iteration of NMI handling. 1302905a36a2SIngo Molnar */ 130323a781e9SAndy Lutomirski subq $8, %rsp 1304905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1305905a36a2SIngo Molnar pushq $__KERNEL_DS 1306905a36a2SIngo Molnar pushq %rdx 1307905a36a2SIngo Molnar pushfq 1308905a36a2SIngo Molnar pushq $__KERNEL_CS 1309905a36a2SIngo Molnar pushq $repeat_nmi 1310905a36a2SIngo Molnar 1311905a36a2SIngo Molnar /* Put stack back */ 1312905a36a2SIngo Molnar addq $(6*8), %rsp 1313905a36a2SIngo Molnar 1314905a36a2SIngo Molnarnested_nmi_out: 1315905a36a2SIngo Molnar popq %rdx 1316905a36a2SIngo Molnar 13170b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1318929bacecSAndy Lutomirski iretq 1319905a36a2SIngo Molnar 1320905a36a2SIngo Molnarfirst_nmi: 13210b22930eSAndy Lutomirski /* Restore rdx. */ 1322905a36a2SIngo Molnar movq (%rsp), %rdx 1323905a36a2SIngo Molnar 132436f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 132536f1a77bSAndy Lutomirski pushq $0 1326905a36a2SIngo Molnar 13270b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1328905a36a2SIngo Molnar subq $(5*8), %rsp 1329905a36a2SIngo Molnar 13300b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1331905a36a2SIngo Molnar .rept 5 1332905a36a2SIngo Molnar pushq 11*8(%rsp) 1333905a36a2SIngo Molnar .endr 13348c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1335905a36a2SIngo Molnar 1336905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1337905a36a2SIngo Molnar 1338a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1339a97439aaSAndy Lutomirski /* 1340a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1341a97439aaSAndy Lutomirski * default because IRET is very expensive. 1342a97439aaSAndy Lutomirski */ 1343a97439aaSAndy Lutomirski pushq $0 /* SS */ 1344a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1345a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1346a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1347a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1348a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1349929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 13508c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1351a97439aaSAndy Lutomirski1: 1352a97439aaSAndy Lutomirski#endif 1353a97439aaSAndy Lutomirski 13540b22930eSAndy Lutomirskirepeat_nmi: 1355905a36a2SIngo Molnar /* 1356905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1357905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1358905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1359905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1360905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1361905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1362905a36a2SIngo Molnar * NMI will update. 13630b22930eSAndy Lutomirski * 13640b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 13650b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 13660b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 13676271fef0SThomas Gleixner * gsbase if needed before we call exc_nmi(). "NMI executing" 136836f1a77bSAndy Lutomirski * is zero. 1369905a36a2SIngo Molnar */ 137036f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1371905a36a2SIngo Molnar 13720b22930eSAndy Lutomirski /* 13730b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 13740b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 13750b22930eSAndy Lutomirski * it or it will end up containing garbage. 13760b22930eSAndy Lutomirski */ 1377905a36a2SIngo Molnar addq $(10*8), %rsp 1378905a36a2SIngo Molnar .rept 5 1379905a36a2SIngo Molnar pushq -6*8(%rsp) 1380905a36a2SIngo Molnar .endr 1381905a36a2SIngo Molnar subq $(5*8), %rsp 1382905a36a2SIngo Molnarend_repeat_nmi: 1383905a36a2SIngo Molnar 1384905a36a2SIngo Molnar /* 13850b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 13860b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 13870b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1388905a36a2SIngo Molnar */ 1389905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1390905a36a2SIngo Molnar 1391905a36a2SIngo Molnar /* 1392905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1393905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1394905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1395905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1396905a36a2SIngo Molnar * exceptions might do. 1397905a36a2SIngo Molnar */ 1398905a36a2SIngo Molnar call paranoid_entry 13998c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1400905a36a2SIngo Molnar 1401905a36a2SIngo Molnar movq %rsp, %rdi 1402905a36a2SIngo Molnar movq $-1, %rsi 14036271fef0SThomas Gleixner call exc_nmi 1404905a36a2SIngo Molnar 140516561f27SDave Hansen /* Always restore stashed CR3 value (see paranoid_entry) */ 140621e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 14078a09317bSDave Hansen 1408c82965f9SChang S. Bae /* 1409c82965f9SChang S. Bae * The above invocation of paranoid_entry stored the GSBASE 1410c82965f9SChang S. Bae * related information in R/EBX depending on the availability 1411c82965f9SChang S. Bae * of FSGSBASE. 1412c82965f9SChang S. Bae * 1413c82965f9SChang S. Bae * If FSGSBASE is enabled, restore the saved GSBASE value 1414c82965f9SChang S. Bae * unconditionally, otherwise take the conditional SWAPGS path. 1415c82965f9SChang S. Bae */ 1416c82965f9SChang S. Bae ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1417c82965f9SChang S. Bae 1418c82965f9SChang S. Bae wrgsbase %rbx 1419c82965f9SChang S. Bae jmp nmi_restore 1420c82965f9SChang S. Bae 1421c82965f9SChang S. Baenmi_no_fsgsbase: 1422c82965f9SChang S. Bae /* EBX == 0 -> invoke SWAPGS */ 1423c82965f9SChang S. Bae testl %ebx, %ebx 1424905a36a2SIngo Molnar jnz nmi_restore 1425c82965f9SChang S. Bae 1426905a36a2SIngo Molnarnmi_swapgs: 142753c9d924SJuergen Gross swapgs 1428c82965f9SChang S. Bae 1429905a36a2SIngo Molnarnmi_restore: 1430502af0d7SDominik Brodowski POP_REGS 14310b22930eSAndy Lutomirski 1432471ee483SAndy Lutomirski /* 1433471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1434471ee483SAndy Lutomirski * at the "iret" frame. 1435471ee483SAndy Lutomirski */ 1436471ee483SAndy Lutomirski addq $6*8, %rsp 1437905a36a2SIngo Molnar 1438810bc075SAndy Lutomirski /* 1439810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1440810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1441929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1442929bacecSAndy Lutomirski * 1443929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1444929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1445929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1446810bc075SAndy Lutomirski */ 1447810bc075SAndy Lutomirski std 1448810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 14490b22930eSAndy Lutomirski 14500b22930eSAndy Lutomirski /* 1451929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1452929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1453929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1454929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 14550b22930eSAndy Lutomirski */ 1456929bacecSAndy Lutomirski iretq 14576271fef0SThomas GleixnerSYM_CODE_END(asm_exc_nmi) 1458905a36a2SIngo Molnar 1459dffb3f9dSAndy Lutomirski#ifndef CONFIG_IA32_EMULATION 1460dffb3f9dSAndy Lutomirski/* 1461dffb3f9dSAndy Lutomirski * This handles SYSCALL from 32-bit code. There is no way to program 1462dffb3f9dSAndy Lutomirski * MSRs to fully disable 32-bit SYSCALL. 1463dffb3f9dSAndy Lutomirski */ 1464bc7b11c0SJiri SlabySYM_CODE_START(ignore_sysret) 14658c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1466905a36a2SIngo Molnar mov $-ENOSYS, %eax 1467b2b1d94cSJan Beulich sysretl 1468bc7b11c0SJiri SlabySYM_CODE_END(ignore_sysret) 1469dffb3f9dSAndy Lutomirski#endif 14702deb4be2SAndy Lutomirski 1471b9f6976bSThomas Gleixner.pushsection .text, "ax" 1472bc7b11c0SJiri SlabySYM_CODE_START(rewind_stack_do_exit) 14738c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 14742deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 14752deb4be2SAndy Lutomirski xorl %ebp, %ebp 14762deb4be2SAndy Lutomirski 14772deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 14788c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 1479f977df7bSJann Horn UNWIND_HINT_REGS 14802deb4be2SAndy Lutomirski 14812deb4be2SAndy Lutomirski call do_exit 1482bc7b11c0SJiri SlabySYM_CODE_END(rewind_stack_do_exit) 1483b9f6976bSThomas Gleixner.popsection 1484