1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11cb1aaebeSMauro Carvalho Chehab * Some of this is documented in Documentation/x86/entry_64.rst 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 186dcc5627SJiri Slaby * - SYM_FUNC_START/END:Define functions in the symbol table. 194d732138SIngo Molnar * - idtentry: Define exception entry points. 20905a36a2SIngo Molnar */ 21905a36a2SIngo Molnar#include <linux/linkage.h> 22905a36a2SIngo Molnar#include <asm/segment.h> 23905a36a2SIngo Molnar#include <asm/cache.h> 24905a36a2SIngo Molnar#include <asm/errno.h> 25905a36a2SIngo Molnar#include <asm/asm-offsets.h> 26905a36a2SIngo Molnar#include <asm/msr.h> 27905a36a2SIngo Molnar#include <asm/unistd.h> 28905a36a2SIngo Molnar#include <asm/thread_info.h> 29905a36a2SIngo Molnar#include <asm/hw_irq.h> 30905a36a2SIngo Molnar#include <asm/page_types.h> 31905a36a2SIngo Molnar#include <asm/irqflags.h> 32905a36a2SIngo Molnar#include <asm/paravirt.h> 33905a36a2SIngo Molnar#include <asm/percpu.h> 34905a36a2SIngo Molnar#include <asm/asm.h> 35905a36a2SIngo Molnar#include <asm/smap.h> 36905a36a2SIngo Molnar#include <asm/pgtable_types.h> 37784d5699SAl Viro#include <asm/export.h> 388c1f7558SJosh Poimboeuf#include <asm/frame.h> 39cfa82a00SThomas Gleixner#include <asm/trapnr.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41*c82965f9SChang S. Bae#include <asm/fsgsbase.h> 42905a36a2SIngo Molnar#include <linux/err.h> 43905a36a2SIngo Molnar 446fd166aaSPeter Zijlstra#include "calling.h" 456fd166aaSPeter Zijlstra 46905a36a2SIngo Molnar.code64 47905a36a2SIngo Molnar.section .entry.text, "ax" 48905a36a2SIngo Molnar 49905a36a2SIngo Molnar#ifdef CONFIG_PARAVIRT 50bc7b11c0SJiri SlabySYM_CODE_START(native_usergs_sysret64) 518c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 52905a36a2SIngo Molnar swapgs 53905a36a2SIngo Molnar sysretq 54bc7b11c0SJiri SlabySYM_CODE_END(native_usergs_sysret64) 55905a36a2SIngo Molnar#endif /* CONFIG_PARAVIRT */ 56905a36a2SIngo Molnar 57905a36a2SIngo Molnar/* 584d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 59905a36a2SIngo Molnar * 60fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 61fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 62fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 63fda57b22SAndy Lutomirski * available when SYSCALL is used. 64fda57b22SAndy Lutomirski * 65fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 66fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 67fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 68fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 69fda57b22SAndy Lutomirski * 704d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 71905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 72905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 73905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 74905a36a2SIngo Molnar * and does not change rsp. 75905a36a2SIngo Molnar * 76905a36a2SIngo Molnar * Registers on entry: 77905a36a2SIngo Molnar * rax system call number 78905a36a2SIngo Molnar * rcx return address 79905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 80905a36a2SIngo Molnar * rdi arg0 81905a36a2SIngo Molnar * rsi arg1 82905a36a2SIngo Molnar * rdx arg2 83905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 84905a36a2SIngo Molnar * r8 arg4 85905a36a2SIngo Molnar * r9 arg5 86905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 87905a36a2SIngo Molnar * 88905a36a2SIngo Molnar * Only called from user space. 89905a36a2SIngo Molnar * 90905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 91905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 92905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 93905a36a2SIngo Molnar */ 94905a36a2SIngo Molnar 95bc7b11c0SJiri SlabySYM_CODE_START(entry_SYSCALL_64) 968c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 97905a36a2SIngo Molnar 988a9949bcSAndy Lutomirski swapgs 99bf904d27SAndy Lutomirski /* tss.sp2 is scratch space. */ 10098f05b51SAndy Lutomirski movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 101bf904d27SAndy Lutomirski SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 102905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 103905a36a2SIngo Molnar 104905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 105905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 10698f05b51SAndy Lutomirski pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 107905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 108905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 109905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 11026ba4e57SJiri SlabySYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) 111905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 11230907fd1SDominik Brodowski 11330907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rax=$-ENOSYS 114905a36a2SIngo Molnar 1151e423bffSAndy Lutomirski /* IRQs are off. */ 116dfe64506SLinus Torvalds movq %rax, %rdi 117dfe64506SLinus Torvalds movq %rsp, %rsi 1181e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 1191e423bffSAndy Lutomirski 120905a36a2SIngo Molnar /* 121905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 1228a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 1238a055d7fSAndy Lutomirski * go to the slow exit path. 124905a36a2SIngo Molnar */ 125905a36a2SIngo Molnar movq RCX(%rsp), %rcx 126905a36a2SIngo Molnar movq RIP(%rsp), %r11 1278a055d7fSAndy Lutomirski 1288a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 1298a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 130905a36a2SIngo Molnar 131905a36a2SIngo Molnar /* 132905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 133905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 134905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 135905a36a2SIngo Molnar * 136905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 137905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 138361b4b58SKirill A. Shutemov * 139cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 140cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 141905a36a2SIngo Molnar */ 14209e61a77SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 14339b95522SKirill A. Shutemov ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 14439b95522SKirill A. Shutemov "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 14509e61a77SKirill A. Shutemov#else 146905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 147905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 14809e61a77SKirill A. Shutemov#endif 1494d732138SIngo Molnar 150905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 151905a36a2SIngo Molnar cmpq %rcx, %r11 1528a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 153905a36a2SIngo Molnar 154905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 1558a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 156905a36a2SIngo Molnar 157905a36a2SIngo Molnar movq R11(%rsp), %r11 158905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 1598a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 160905a36a2SIngo Molnar 161905a36a2SIngo Molnar /* 1623e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 1633e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 1643e035305SBorislav Petkov * need to restore it correctly. 1653e035305SBorislav Petkov * 1663e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 1673e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 1683e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 1693e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 1703e035305SBorislav Petkov * this user code: 171905a36a2SIngo Molnar * 172905a36a2SIngo Molnar * movq $stuck_here, %rcx 173905a36a2SIngo Molnar * pushfq 174905a36a2SIngo Molnar * popq %r11 175905a36a2SIngo Molnar * stuck_here: 176905a36a2SIngo Molnar * 177905a36a2SIngo Molnar * would never get past 'stuck_here'. 178905a36a2SIngo Molnar */ 179905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 1808a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 181905a36a2SIngo Molnar 182905a36a2SIngo Molnar /* nothing to check for RSP */ 183905a36a2SIngo Molnar 184905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 1858a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 186905a36a2SIngo Molnar 187905a36a2SIngo Molnar /* 188905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 189905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 190905a36a2SIngo Molnar */ 191905a36a2SIngo Molnarsyscall_return_via_sysret: 192905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 193502af0d7SDominik Brodowski POP_REGS pop_rdi=0 skip_r11rcx=1 1943e3b9293SAndy Lutomirski 1953e3b9293SAndy Lutomirski /* 1963e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 1973e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 1983e3b9293SAndy Lutomirski */ 1993e3b9293SAndy Lutomirski movq %rsp, %rdi 200c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 2011fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 2023e3b9293SAndy Lutomirski 2033e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 2043e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 2053e3b9293SAndy Lutomirski 2063e3b9293SAndy Lutomirski /* 2073e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 2083e3b9293SAndy Lutomirski * We can do future final exit work right here. 2093e3b9293SAndy Lutomirski */ 210afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 211afaef01cSAlexander Popov 2126fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 2133e3b9293SAndy Lutomirski 2144fbb3910SAndy Lutomirski popq %rdi 2153e3b9293SAndy Lutomirski popq %rsp 216905a36a2SIngo Molnar USERGS_SYSRET64 217bc7b11c0SJiri SlabySYM_CODE_END(entry_SYSCALL_64) 218905a36a2SIngo Molnar 219905a36a2SIngo Molnar/* 2200100301bSBrian Gerst * %rdi: prev task 2210100301bSBrian Gerst * %rsi: next task 2220100301bSBrian Gerst */ 223b9f6976bSThomas Gleixner.pushsection .text, "ax" 22496c64806SJosh PoimboeufSYM_FUNC_START(__switch_to_asm) 2250100301bSBrian Gerst /* 2260100301bSBrian Gerst * Save callee-saved registers 2270100301bSBrian Gerst * This must match the order in inactive_task_frame 2280100301bSBrian Gerst */ 2290100301bSBrian Gerst pushq %rbp 2300100301bSBrian Gerst pushq %rbx 2310100301bSBrian Gerst pushq %r12 2320100301bSBrian Gerst pushq %r13 2330100301bSBrian Gerst pushq %r14 2340100301bSBrian Gerst pushq %r15 2350100301bSBrian Gerst 2360100301bSBrian Gerst /* switch stack */ 2370100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 2380100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 2390100301bSBrian Gerst 240050e9baaSLinus Torvalds#ifdef CONFIG_STACKPROTECTOR 2410100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 242e6401c13SAndy Lutomirski movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 2430100301bSBrian Gerst#endif 2440100301bSBrian Gerst 245c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 246c995efd5SDavid Woodhouse /* 247c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 248c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 249c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 250c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 251c995efd5SDavid Woodhouse * speculative execution to prevent attack. 252c995efd5SDavid Woodhouse */ 253d1c99108SDavid Woodhouse FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 254c995efd5SDavid Woodhouse#endif 255c995efd5SDavid Woodhouse 2560100301bSBrian Gerst /* restore callee-saved registers */ 2570100301bSBrian Gerst popq %r15 2580100301bSBrian Gerst popq %r14 2590100301bSBrian Gerst popq %r13 2600100301bSBrian Gerst popq %r12 2610100301bSBrian Gerst popq %rbx 2620100301bSBrian Gerst popq %rbp 2630100301bSBrian Gerst 2640100301bSBrian Gerst jmp __switch_to 26596c64806SJosh PoimboeufSYM_FUNC_END(__switch_to_asm) 266b9f6976bSThomas Gleixner.popsection 2670100301bSBrian Gerst 2680100301bSBrian Gerst/* 269905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 270905a36a2SIngo Molnar * 2710100301bSBrian Gerst * rax: prev task we switched from 272616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 273616d2483SBrian Gerst * r12: kernel thread arg 274905a36a2SIngo Molnar */ 275b9f6976bSThomas Gleixner.pushsection .text, "ax" 276bc7b11c0SJiri SlabySYM_CODE_START(ret_from_fork) 2778c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 2780100301bSBrian Gerst movq %rax, %rdi 2794d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 280905a36a2SIngo Molnar 281616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 282616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 283905a36a2SIngo Molnar 284616d2483SBrian Gerst2: 2858c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 286ebd57499SJosh Poimboeuf movq %rsp, %rdi 28724d978b7SAndy Lutomirski call syscall_return_slowpath /* returns with IRQs disabled */ 2888a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 289616d2483SBrian Gerst 290616d2483SBrian Gerst1: 291616d2483SBrian Gerst /* kernel thread */ 292d31a5802SJosh Poimboeuf UNWIND_HINT_EMPTY 293616d2483SBrian Gerst movq %r12, %rdi 29434fdce69SPeter Zijlstra CALL_NOSPEC rbx 295616d2483SBrian Gerst /* 296616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 297616d2483SBrian Gerst * calling do_execve(). Exit to userspace to complete the execve() 298616d2483SBrian Gerst * syscall. 299616d2483SBrian Gerst */ 300616d2483SBrian Gerst movq $0, RAX(%rsp) 301616d2483SBrian Gerst jmp 2b 302bc7b11c0SJiri SlabySYM_CODE_END(ret_from_fork) 303b9f6976bSThomas Gleixner.popsection 304905a36a2SIngo Molnar 3051d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 3061d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 307e17f8234SBoris Ostrovsky pushq %rax 308e17f8234SBoris Ostrovsky SAVE_FLAGS(CLBR_RAX) 309e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 3101d3e53e8SAndy Lutomirski jz .Lokay_\@ 3111d3e53e8SAndy Lutomirski ud2 3121d3e53e8SAndy Lutomirski.Lokay_\@: 313e17f8234SBoris Ostrovsky popq %rax 3141d3e53e8SAndy Lutomirski#endif 3151d3e53e8SAndy Lutomirski.endm 3161d3e53e8SAndy Lutomirski 317cfa82a00SThomas Gleixner/** 318cfa82a00SThomas Gleixner * idtentry_body - Macro to emit code calling the C function 319cfa82a00SThomas Gleixner * @cfunc: C function to be called 320cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 321cfa82a00SThomas Gleixner */ 322e2dcb5f1SThomas Gleixner.macro idtentry_body cfunc has_error_code:req 323cfa82a00SThomas Gleixner 324cfa82a00SThomas Gleixner call error_entry 325cfa82a00SThomas Gleixner UNWIND_HINT_REGS 326cfa82a00SThomas Gleixner 327cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ 328cfa82a00SThomas Gleixner 329cfa82a00SThomas Gleixner .if \has_error_code == 1 330cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 331cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 332cfa82a00SThomas Gleixner .endif 333cfa82a00SThomas Gleixner 334cfa82a00SThomas Gleixner call \cfunc 335cfa82a00SThomas Gleixner 336424c7d0aSThomas Gleixner jmp error_return 337cfa82a00SThomas Gleixner.endm 338cfa82a00SThomas Gleixner 339cfa82a00SThomas Gleixner/** 340cfa82a00SThomas Gleixner * idtentry - Macro to generate entry stubs for simple IDT entries 341cfa82a00SThomas Gleixner * @vector: Vector number 342cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 343cfa82a00SThomas Gleixner * @cfunc: C function to be called 344cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 345cfa82a00SThomas Gleixner * 346cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for straight forward 347cfa82a00SThomas Gleixner * and simple IDT entries. No IST stack, no paranoid entry checks. 348cfa82a00SThomas Gleixner */ 349e2dcb5f1SThomas Gleixner.macro idtentry vector asmsym cfunc has_error_code:req 350cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 351cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=\has_error_code*8 352cfa82a00SThomas Gleixner ASM_CLAC 353cfa82a00SThomas Gleixner 354cfa82a00SThomas Gleixner .if \has_error_code == 0 355cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 356cfa82a00SThomas Gleixner .endif 357cfa82a00SThomas Gleixner 358cfa82a00SThomas Gleixner .if \vector == X86_TRAP_BP 359cfa82a00SThomas Gleixner /* 360cfa82a00SThomas Gleixner * If coming from kernel space, create a 6-word gap to allow the 361cfa82a00SThomas Gleixner * int3 handler to emulate a call instruction. 362cfa82a00SThomas Gleixner */ 363cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 364cfa82a00SThomas Gleixner jnz .Lfrom_usermode_no_gap_\@ 365cfa82a00SThomas Gleixner .rept 6 366cfa82a00SThomas Gleixner pushq 5*8(%rsp) 367cfa82a00SThomas Gleixner .endr 368cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 369cfa82a00SThomas Gleixner.Lfrom_usermode_no_gap_\@: 370cfa82a00SThomas Gleixner .endif 371cfa82a00SThomas Gleixner 372e2dcb5f1SThomas Gleixner idtentry_body \cfunc \has_error_code 373cfa82a00SThomas Gleixner 374cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 375cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 376cfa82a00SThomas Gleixner.endm 377cfa82a00SThomas Gleixner 378cfa82a00SThomas Gleixner/* 3790bf7c314SThomas Gleixner * Interrupt entry/exit. 3800bf7c314SThomas Gleixner * 3810bf7c314SThomas Gleixner + The interrupt stubs push (vector) onto the stack, which is the error_code 3820bf7c314SThomas Gleixner * position of idtentry exceptions, and jump to one of the two idtentry points 3830bf7c314SThomas Gleixner * (common/spurious). 3840bf7c314SThomas Gleixner * 3850bf7c314SThomas Gleixner * common_interrupt is a hotpath, align it to a cache line 3860bf7c314SThomas Gleixner */ 3870bf7c314SThomas Gleixner.macro idtentry_irq vector cfunc 3880bf7c314SThomas Gleixner .p2align CONFIG_X86_L1_CACHE_SHIFT 3890bf7c314SThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=1 3900bf7c314SThomas Gleixner.endm 3910bf7c314SThomas Gleixner 3920bf7c314SThomas Gleixner/* 3936368558cSThomas Gleixner * System vectors which invoke their handlers directly and are not 3946368558cSThomas Gleixner * going through the regular common device interrupt handling code. 3956368558cSThomas Gleixner */ 3966368558cSThomas Gleixner.macro idtentry_sysvec vector cfunc 3976368558cSThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=0 3986368558cSThomas Gleixner.endm 3996368558cSThomas Gleixner 400cfa82a00SThomas Gleixner/** 401cfa82a00SThomas Gleixner * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB 402cfa82a00SThomas Gleixner * @vector: Vector number 403cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 404cfa82a00SThomas Gleixner * @cfunc: C function to be called 405cfa82a00SThomas Gleixner * 406cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for #MC and #DB 407cfa82a00SThomas Gleixner * 408cfa82a00SThomas Gleixner * If the entry comes from user space it uses the normal entry path 409cfa82a00SThomas Gleixner * including the return to user space work and preemption checks on 410cfa82a00SThomas Gleixner * exit. 411cfa82a00SThomas Gleixner * 412cfa82a00SThomas Gleixner * If hits in kernel mode then it needs to go through the paranoid 413cfa82a00SThomas Gleixner * entry as the exception can hit any random state. No preemption 414cfa82a00SThomas Gleixner * check on exit to keep the paranoid path simple. 415cfa82a00SThomas Gleixner */ 416cfa82a00SThomas Gleixner.macro idtentry_mce_db vector asmsym cfunc 417cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 418cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS 419cfa82a00SThomas Gleixner ASM_CLAC 420cfa82a00SThomas Gleixner 421cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 422cfa82a00SThomas Gleixner 423cfa82a00SThomas Gleixner /* 424cfa82a00SThomas Gleixner * If the entry is from userspace, switch stacks and treat it as 425cfa82a00SThomas Gleixner * a normal entry. 426cfa82a00SThomas Gleixner */ 427cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 428cfa82a00SThomas Gleixner jnz .Lfrom_usermode_switch_stack_\@ 429cfa82a00SThomas Gleixner 430*c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 431cfa82a00SThomas Gleixner call paranoid_entry 432cfa82a00SThomas Gleixner 433cfa82a00SThomas Gleixner UNWIND_HINT_REGS 434cfa82a00SThomas Gleixner 435cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer */ 436cfa82a00SThomas Gleixner 437cfa82a00SThomas Gleixner call \cfunc 438cfa82a00SThomas Gleixner 439cfa82a00SThomas Gleixner jmp paranoid_exit 440cfa82a00SThomas Gleixner 441cfa82a00SThomas Gleixner /* Switch to the regular task stack and use the noist entry point */ 442cfa82a00SThomas Gleixner.Lfrom_usermode_switch_stack_\@: 443e2dcb5f1SThomas Gleixner idtentry_body noist_\cfunc, has_error_code=0 444cfa82a00SThomas Gleixner 445cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 446cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 447cfa82a00SThomas Gleixner.endm 448cfa82a00SThomas Gleixner 449cfa82a00SThomas Gleixner/* 450cfa82a00SThomas Gleixner * Double fault entry. Straight paranoid. No checks from which context 451cfa82a00SThomas Gleixner * this comes because for the espfix induced #DF this would do the wrong 452cfa82a00SThomas Gleixner * thing. 453cfa82a00SThomas Gleixner */ 454cfa82a00SThomas Gleixner.macro idtentry_df vector asmsym cfunc 455cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 456cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 457cfa82a00SThomas Gleixner ASM_CLAC 458cfa82a00SThomas Gleixner 459*c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 460cfa82a00SThomas Gleixner call paranoid_entry 461cfa82a00SThomas Gleixner UNWIND_HINT_REGS 462cfa82a00SThomas Gleixner 463cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into first argument */ 464cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 465cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 466cfa82a00SThomas Gleixner call \cfunc 467cfa82a00SThomas Gleixner 468cfa82a00SThomas Gleixner jmp paranoid_exit 469cfa82a00SThomas Gleixner 470cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 471cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 472cfa82a00SThomas Gleixner.endm 473cfa82a00SThomas Gleixner 474905a36a2SIngo Molnar/* 47553aaf262SThomas Gleixner * Include the defines which emit the idt entries which are shared 476f0178fc0SThomas Gleixner * shared between 32 and 64 bit and emit the __irqentry_text_* markers 477f0178fc0SThomas Gleixner * so the stacktrace boundary checks work. 47853aaf262SThomas Gleixner */ 479f0178fc0SThomas Gleixner .align 16 480f0178fc0SThomas Gleixner .globl __irqentry_text_start 481f0178fc0SThomas Gleixner__irqentry_text_start: 482f0178fc0SThomas Gleixner 48353aaf262SThomas Gleixner#include <asm/idtentry.h> 48453aaf262SThomas Gleixner 485f0178fc0SThomas Gleixner .align 16 486f0178fc0SThomas Gleixner .globl __irqentry_text_end 487f0178fc0SThomas Gleixner__irqentry_text_end: 488f0178fc0SThomas Gleixner 489fa5e5c40SThomas GleixnerSYM_CODE_START_LOCAL(common_interrupt_return) 49026ba4e57SJiri SlabySYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) 49126c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 49226c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 4931e4c4f61SBorislav Petkov testb $3, CS(%rsp) 49426c4ef9cSAndy Lutomirski jnz 1f 49526c4ef9cSAndy Lutomirski ud2 49626c4ef9cSAndy Lutomirski1: 49726c4ef9cSAndy Lutomirski#endif 498502af0d7SDominik Brodowski POP_REGS pop_rdi=0 4993e3b9293SAndy Lutomirski 5003e3b9293SAndy Lutomirski /* 5013e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 5023e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 5033e3b9293SAndy Lutomirski */ 5043e3b9293SAndy Lutomirski movq %rsp, %rdi 505c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 5061fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 5073e3b9293SAndy Lutomirski 5083e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 5093e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 5103e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 5113e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 5123e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 5133e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 5143e3b9293SAndy Lutomirski 5153e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 5163e3b9293SAndy Lutomirski pushq (%rdi) 5173e3b9293SAndy Lutomirski 5183e3b9293SAndy Lutomirski /* 5193e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 5203e3b9293SAndy Lutomirski * We can do future final exit work right here. 5213e3b9293SAndy Lutomirski */ 522afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 5233e3b9293SAndy Lutomirski 5246fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 5258a09317bSDave Hansen 5263e3b9293SAndy Lutomirski /* Restore RDI. */ 5273e3b9293SAndy Lutomirski popq %rdi 5283e3b9293SAndy Lutomirski SWAPGS 52926c4ef9cSAndy Lutomirski INTERRUPT_RETURN 53026c4ef9cSAndy Lutomirski 531905a36a2SIngo Molnar 53226ba4e57SJiri SlabySYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) 53326c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 53426c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 5351e4c4f61SBorislav Petkov testb $3, CS(%rsp) 53626c4ef9cSAndy Lutomirski jz 1f 53726c4ef9cSAndy Lutomirski ud2 53826c4ef9cSAndy Lutomirski1: 53926c4ef9cSAndy Lutomirski#endif 540502af0d7SDominik Brodowski POP_REGS 541e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 54210bcc80eSMathieu Desnoyers /* 54310bcc80eSMathieu Desnoyers * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 54410bcc80eSMathieu Desnoyers * when returning from IPI handler. 54510bcc80eSMathieu Desnoyers */ 546905a36a2SIngo Molnar INTERRUPT_RETURN 547905a36a2SIngo Molnar 548cc66936eSJiri SlabySYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) 5498c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 550905a36a2SIngo Molnar /* 551905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 552905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 553905a36a2SIngo Molnar */ 554905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 555905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 556905a36a2SIngo Molnar jnz native_irq_return_ldt 557905a36a2SIngo Molnar#endif 558905a36a2SIngo Molnar 559cc66936eSJiri SlabySYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) 560905a36a2SIngo Molnar /* 561905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 562905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 563c29c775aSThomas Gleixner * Double-faults due to espfix64 are handled in exc_double_fault. 564905a36a2SIngo Molnar * Other faults here are fatal. 565905a36a2SIngo Molnar */ 566905a36a2SIngo Molnar iretq 567905a36a2SIngo Molnar 568905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 569905a36a2SIngo Molnarnative_irq_return_ldt: 57085063facSAndy Lutomirski /* 57185063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 57285063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 57385063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 57485063facSAndy Lutomirski * of the ESPFIX stack. 57585063facSAndy Lutomirski * 57685063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 57785063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 57885063facSAndy Lutomirski * 57985063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 58085063facSAndy Lutomirski * 58185063facSAndy Lutomirski * --- top of ESPFIX stack --- 58285063facSAndy Lutomirski * SS 58385063facSAndy Lutomirski * RSP 58485063facSAndy Lutomirski * RFLAGS 58585063facSAndy Lutomirski * CS 58685063facSAndy Lutomirski * RIP <-- RSP points here when we're done 58785063facSAndy Lutomirski * RAX <-- espfix_waddr points here 58885063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 58985063facSAndy Lutomirski */ 59085063facSAndy Lutomirski 59185063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 5928a09317bSDave Hansen SWAPGS /* to kernel GS */ 5938a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 5948a09317bSDave Hansen 595905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 59685063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 59785063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 598905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 59985063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 600905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 60185063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 602905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 60385063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 604905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 60585063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 606905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 60785063facSAndy Lutomirski /* Now RAX == RSP. */ 60885063facSAndy Lutomirski 60985063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 61085063facSAndy Lutomirski 61185063facSAndy Lutomirski /* 61285063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 61385063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 61485063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 61585063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 61685063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 61785063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 61885063facSAndy Lutomirski */ 619905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 6208a09317bSDave Hansen 6216fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 6228a09317bSDave Hansen SWAPGS /* to user GS */ 6238a09317bSDave Hansen popq %rdi /* Restore user RDI */ 6248a09317bSDave Hansen 625905a36a2SIngo Molnar movq %rax, %rsp 6268c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 62785063facSAndy Lutomirski 62885063facSAndy Lutomirski /* 62985063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 63085063facSAndy Lutomirski * still read. 63185063facSAndy Lutomirski */ 63285063facSAndy Lutomirski popq %rax /* Restore user RAX */ 63385063facSAndy Lutomirski 63485063facSAndy Lutomirski /* 63585063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 63685063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 63785063facSAndy Lutomirski * values. We can now IRET back to userspace. 63885063facSAndy Lutomirski */ 639905a36a2SIngo Molnar jmp native_irq_return_iret 640905a36a2SIngo Molnar#endif 641fa5e5c40SThomas GleixnerSYM_CODE_END(common_interrupt_return) 642fa5e5c40SThomas Gleixner_ASM_NOKPROBE(common_interrupt_return) 643905a36a2SIngo Molnar 644905a36a2SIngo Molnar/* 6454d732138SIngo Molnar * Reload gs selector with exception handling 6464d732138SIngo Molnar * edi: new selector 647b9f6976bSThomas Gleixner * 648b9f6976bSThomas Gleixner * Is in entry.text as it shouldn't be instrumented. 6494d732138SIngo Molnar */ 650410367e3SThomas GleixnerSYM_FUNC_START(asm_load_gs_index) 6518c1f7558SJosh Poimboeuf FRAME_BEGIN 652c9317202SThomas Gleixner swapgs 65342c748bbSBorislav Petkov.Lgs_change: 654905a36a2SIngo Molnar movl %edi, %gs 65596e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 656c9317202SThomas Gleixner swapgs 6578c1f7558SJosh Poimboeuf FRAME_END 658905a36a2SIngo Molnar ret 659410367e3SThomas GleixnerSYM_FUNC_END(asm_load_gs_index) 660410367e3SThomas GleixnerEXPORT_SYMBOL(asm_load_gs_index) 661905a36a2SIngo Molnar 66298ededb6SJiri Slaby _ASM_EXTABLE(.Lgs_change, .Lbad_gs) 663905a36a2SIngo Molnar .section .fixup, "ax" 664905a36a2SIngo Molnar /* running with kernelgs */ 665ef77e688SJiri SlabySYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) 666c9317202SThomas Gleixner swapgs /* switch back to user gs */ 667b038c842SAndy Lutomirski.macro ZAP_GS 668b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 669b038c842SAndy Lutomirski movl $__USER_DS, %eax 670b038c842SAndy Lutomirski movl %eax, %gs 671b038c842SAndy Lutomirski.endm 672b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 673905a36a2SIngo Molnar xorl %eax, %eax 674905a36a2SIngo Molnar movl %eax, %gs 675905a36a2SIngo Molnar jmp 2b 676ef77e688SJiri SlabySYM_CODE_END(.Lbad_gs) 677905a36a2SIngo Molnar .previous 678905a36a2SIngo Molnar 679931b9414SThomas Gleixner/* 680931b9414SThomas Gleixner * rdi: New stack pointer points to the top word of the stack 681931b9414SThomas Gleixner * rsi: Function pointer 682931b9414SThomas Gleixner * rdx: Function argument (can be NULL if none) 683931b9414SThomas Gleixner */ 684931b9414SThomas GleixnerSYM_FUNC_START(asm_call_on_stack) 685931b9414SThomas Gleixner /* 686931b9414SThomas Gleixner * Save the frame pointer unconditionally. This allows the ORC 687931b9414SThomas Gleixner * unwinder to handle the stack switch. 688931b9414SThomas Gleixner */ 689931b9414SThomas Gleixner pushq %rbp 690931b9414SThomas Gleixner mov %rsp, %rbp 691931b9414SThomas Gleixner 692931b9414SThomas Gleixner /* 693931b9414SThomas Gleixner * The unwinder relies on the word at the top of the new stack 694931b9414SThomas Gleixner * page linking back to the previous RSP. 695931b9414SThomas Gleixner */ 696931b9414SThomas Gleixner mov %rsp, (%rdi) 697931b9414SThomas Gleixner mov %rdi, %rsp 698931b9414SThomas Gleixner /* Move the argument to the right place */ 699931b9414SThomas Gleixner mov %rdx, %rdi 700931b9414SThomas Gleixner 701931b9414SThomas Gleixner1: 702931b9414SThomas Gleixner .pushsection .discard.instr_begin 703931b9414SThomas Gleixner .long 1b - . 704931b9414SThomas Gleixner .popsection 705931b9414SThomas Gleixner 706931b9414SThomas Gleixner CALL_NOSPEC rsi 707931b9414SThomas Gleixner 708931b9414SThomas Gleixner2: 709931b9414SThomas Gleixner .pushsection .discard.instr_end 710931b9414SThomas Gleixner .long 2b - . 711931b9414SThomas Gleixner .popsection 712931b9414SThomas Gleixner 713931b9414SThomas Gleixner /* Restore the previous stack pointer from RBP. */ 714931b9414SThomas Gleixner leaveq 715931b9414SThomas Gleixner ret 716931b9414SThomas GleixnerSYM_FUNC_END(asm_call_on_stack) 717931b9414SThomas Gleixner 71828c11b0fSJuergen Gross#ifdef CONFIG_XEN_PV 719905a36a2SIngo Molnar/* 720905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 721905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 722905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 723905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 724905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 725905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 726905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 727905a36a2SIngo Molnar * like to avoid the possibility. 728905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 729905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 730905a36a2SIngo Molnar * activation and restart the handler using the previous one. 7312f6474e4SThomas Gleixner * 7322f6474e4SThomas Gleixner * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) 733905a36a2SIngo Molnar */ 7342f6474e4SThomas GleixnerSYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) 7354d732138SIngo Molnar 736905a36a2SIngo Molnar/* 737905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 738905a36a2SIngo Molnar * see the correct pointer to the pt_regs 739905a36a2SIngo Molnar */ 7408c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 7414d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 7428c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 7431d3e53e8SAndy Lutomirski 7442f6474e4SThomas Gleixner call xen_pv_evtchn_do_upcall 7451d3e53e8SAndy Lutomirski 7462f6474e4SThomas Gleixner jmp error_return 7472f6474e4SThomas GleixnerSYM_CODE_END(exc_xen_hypervisor_callback) 748905a36a2SIngo Molnar 749905a36a2SIngo Molnar/* 750905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 751905a36a2SIngo Molnar * We get here for two reasons: 752905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 753905a36a2SIngo Molnar * 2. Fault while executing IRET 754905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 755905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 756905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 757905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 758905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 759905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 760905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 761905a36a2SIngo Molnar */ 762bc7b11c0SJiri SlabySYM_CODE_START(xen_failsafe_callback) 7638c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 764905a36a2SIngo Molnar movl %ds, %ecx 765905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 766905a36a2SIngo Molnar jne 1f 767905a36a2SIngo Molnar movl %es, %ecx 768905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 769905a36a2SIngo Molnar jne 1f 770905a36a2SIngo Molnar movl %fs, %ecx 771905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 772905a36a2SIngo Molnar jne 1f 773905a36a2SIngo Molnar movl %gs, %ecx 774905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 775905a36a2SIngo Molnar jne 1f 776905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 777905a36a2SIngo Molnar movq (%rsp), %rcx 778905a36a2SIngo Molnar movq 8(%rsp), %r11 779905a36a2SIngo Molnar addq $0x30, %rsp 780905a36a2SIngo Molnar pushq $0 /* RIP */ 7818c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 782be4c11afSThomas Gleixner jmp asm_exc_general_protection 783905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 784905a36a2SIngo Molnar movq (%rsp), %rcx 785905a36a2SIngo Molnar movq 8(%rsp), %r11 786905a36a2SIngo Molnar addq $0x30, %rsp 7878c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 788905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 7893f01daecSDominik Brodowski PUSH_AND_CLEAR_REGS 790946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 791e88d9741SThomas Gleixner jmp error_return 792bc7b11c0SJiri SlabySYM_CODE_END(xen_failsafe_callback) 79328c11b0fSJuergen Gross#endif /* CONFIG_XEN_PV */ 794905a36a2SIngo Molnar 795905a36a2SIngo Molnar/* 796*c82965f9SChang S. Bae * Save all registers in pt_regs. Return GSBASE related information 797*c82965f9SChang S. Bae * in EBX depending on the availability of the FSGSBASE instructions: 798*c82965f9SChang S. Bae * 799*c82965f9SChang S. Bae * FSGSBASE R/EBX 800*c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 801*c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 802*c82965f9SChang S. Bae * 803*c82965f9SChang S. Bae * Y GSBASE value at entry, must be restored in paranoid_exit 804905a36a2SIngo Molnar */ 805ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_entry) 8068c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 807905a36a2SIngo Molnar cld 8089e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 8099e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 8108a09317bSDave Hansen 81116561f27SDave Hansen /* 81216561f27SDave Hansen * Always stash CR3 in %r14. This value will be restored, 813ae852495SAndy Lutomirski * verbatim, at exit. Needed if paranoid_entry interrupted 814ae852495SAndy Lutomirski * another entry that already switched to the user CR3 value 815ae852495SAndy Lutomirski * but has not yet returned to userspace. 81616561f27SDave Hansen * 81716561f27SDave Hansen * This is also why CS (stashed in the "iret frame" by the 81816561f27SDave Hansen * hardware at entry) can not be used: this may be a return 819ae852495SAndy Lutomirski * to kernel code, but with a user CR3 value. 82096b23714SChang S. Bae * 82196b23714SChang S. Bae * Switching CR3 does not depend on kernel GSBASE so it can 82296b23714SChang S. Bae * be done before switching to the kernel GSBASE. This is 82396b23714SChang S. Bae * required for FSGSBASE because the kernel GSBASE has to 82496b23714SChang S. Bae * be retrieved from a kernel internal table. 82516561f27SDave Hansen */ 8268a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 8278a09317bSDave Hansen 828*c82965f9SChang S. Bae /* 829*c82965f9SChang S. Bae * Handling GSBASE depends on the availability of FSGSBASE. 830*c82965f9SChang S. Bae * 831*c82965f9SChang S. Bae * Without FSGSBASE the kernel enforces that negative GSBASE 832*c82965f9SChang S. Bae * values indicate kernel GSBASE. With FSGSBASE no assumptions 833*c82965f9SChang S. Bae * can be made about the GSBASE value when entering from user 834*c82965f9SChang S. Bae * space. 835*c82965f9SChang S. Bae */ 836*c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 837*c82965f9SChang S. Bae 838*c82965f9SChang S. Bae /* 839*c82965f9SChang S. Bae * Read the current GSBASE and store it in %rbx unconditionally, 840*c82965f9SChang S. Bae * retrieve and set the current CPUs kernel GSBASE. The stored value 841*c82965f9SChang S. Bae * has to be restored in paranoid_exit unconditionally. 842*c82965f9SChang S. Bae * 843*c82965f9SChang S. Bae * The MSR write ensures that no subsequent load is based on a 844*c82965f9SChang S. Bae * mispredicted GSBASE. No extra FENCE required. 845*c82965f9SChang S. Bae */ 846*c82965f9SChang S. Bae SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 847*c82965f9SChang S. Bae ret 848*c82965f9SChang S. Bae 849*c82965f9SChang S. Bae.Lparanoid_entry_checkgs: 85096b23714SChang S. Bae /* EBX = 1 -> kernel GSBASE active, no restore required */ 85196b23714SChang S. Bae movl $1, %ebx 85296b23714SChang S. Bae /* 85396b23714SChang S. Bae * The kernel-enforced convention is a negative GSBASE indicates 85496b23714SChang S. Bae * a kernel value. No SWAPGS needed on entry and exit. 85596b23714SChang S. Bae */ 85696b23714SChang S. Bae movl $MSR_GS_BASE, %ecx 85796b23714SChang S. Bae rdmsr 85896b23714SChang S. Bae testl %edx, %edx 85996b23714SChang S. Bae jns .Lparanoid_entry_swapgs 86096b23714SChang S. Bae ret 86196b23714SChang S. Bae 86296b23714SChang S. Bae.Lparanoid_entry_swapgs: 86396b23714SChang S. Bae SWAPGS 86496b23714SChang S. Bae 86518ec54fdSJosh Poimboeuf /* 86618ec54fdSJosh Poimboeuf * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an 86718ec54fdSJosh Poimboeuf * unconditional CR3 write, even in the PTI case. So do an lfence 86818ec54fdSJosh Poimboeuf * to prevent GS speculation, regardless of whether PTI is enabled. 86918ec54fdSJosh Poimboeuf */ 87018ec54fdSJosh Poimboeuf FENCE_SWAPGS_KERNEL_ENTRY 87118ec54fdSJosh Poimboeuf 87296b23714SChang S. Bae /* EBX = 0 -> SWAPGS required on exit */ 87396b23714SChang S. Bae xorl %ebx, %ebx 8748a09317bSDave Hansen ret 875ef1e0315SJiri SlabySYM_CODE_END(paranoid_entry) 876905a36a2SIngo Molnar 877905a36a2SIngo Molnar/* 878905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 879905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 880905a36a2SIngo Molnar * from kernel space. 881905a36a2SIngo Molnar * 882905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 883905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 884*c82965f9SChang S. Bae * be complicated. Fortunately, there's no good reason to try 885*c82965f9SChang S. Bae * to handle preemption here. 8864d732138SIngo Molnar * 887*c82965f9SChang S. Bae * R/EBX contains the GSBASE related information depending on the 888*c82965f9SChang S. Bae * availability of the FSGSBASE instructions: 889*c82965f9SChang S. Bae * 890*c82965f9SChang S. Bae * FSGSBASE R/EBX 891*c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 892*c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 893*c82965f9SChang S. Bae * 894*c82965f9SChang S. Bae * Y User space GSBASE, must be restored unconditionally 895905a36a2SIngo Molnar */ 896ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_exit) 8978c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 898*c82965f9SChang S. Bae /* 899*c82965f9SChang S. Bae * The order of operations is important. RESTORE_CR3 requires 900*c82965f9SChang S. Bae * kernel GSBASE. 901*c82965f9SChang S. Bae * 902*c82965f9SChang S. Bae * NB to anyone to try to optimize this code: this code does 903*c82965f9SChang S. Bae * not execute at all for exceptions from user mode. Those 904*c82965f9SChang S. Bae * exceptions go through error_exit instead. 905*c82965f9SChang S. Bae */ 906*c82965f9SChang S. Bae RESTORE_CR3 scratch_reg=%rax save_reg=%r14 907*c82965f9SChang S. Bae 908*c82965f9SChang S. Bae /* Handle the three GSBASE cases */ 909*c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE 910*c82965f9SChang S. Bae 911*c82965f9SChang S. Bae /* With FSGSBASE enabled, unconditionally restore GSBASE */ 912*c82965f9SChang S. Bae wrgsbase %rbx 91345c08383SThomas Gleixner jmp restore_regs_and_return_to_kernel 914*c82965f9SChang S. Bae 915*c82965f9SChang S. Bae.Lparanoid_exit_checkgs: 916*c82965f9SChang S. Bae /* On non-FSGSBASE systems, conditionally do SWAPGS */ 917*c82965f9SChang S. Bae testl %ebx, %ebx 918*c82965f9SChang S. Bae jnz restore_regs_and_return_to_kernel 919*c82965f9SChang S. Bae 920*c82965f9SChang S. Bae /* We are returning to a context with user GSBASE */ 921*c82965f9SChang S. Bae SWAPGS_UNSAFE_STACK 922e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 923ef1e0315SJiri SlabySYM_CODE_END(paranoid_exit) 924905a36a2SIngo Molnar 925905a36a2SIngo Molnar/* 9269e809d15SDominik Brodowski * Save all registers in pt_regs, and switch GS if needed. 927905a36a2SIngo Molnar */ 928ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_entry) 9299e809d15SDominik Brodowski UNWIND_HINT_FUNC 930905a36a2SIngo Molnar cld 9319e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 9329e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 933905a36a2SIngo Molnar testb $3, CS+8(%rsp) 934cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 935539f5113SAndy Lutomirski 936cb6f64edSAndy Lutomirski /* 937cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 938cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 939cb6f64edSAndy Lutomirski */ 940905a36a2SIngo Molnar SWAPGS 94118ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 9428a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 9438a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 944539f5113SAndy Lutomirski 945cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 9467f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 9477f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 9487f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 9497f2590a1SAndy Lutomirski call sync_regs 9507f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 9517f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 9527f2590a1SAndy Lutomirski pushq %r12 953f1075053SAndy Lutomirski ret 95402bc7768SAndy Lutomirski 95518ec54fdSJosh Poimboeuf.Lerror_entry_done_lfence: 95618ec54fdSJosh Poimboeuf FENCE_SWAPGS_KERNEL_ENTRY 957cb6f64edSAndy Lutomirski.Lerror_entry_done: 958905a36a2SIngo Molnar ret 959905a36a2SIngo Molnar 960905a36a2SIngo Molnar /* 961905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 962905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 963905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 964905a36a2SIngo Molnar * for these here too. 965905a36a2SIngo Molnar */ 966cb6f64edSAndy Lutomirski.Lerror_kernelspace: 967905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 968905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 969cb6f64edSAndy Lutomirski je .Lerror_bad_iret 970905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 971905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 972cb6f64edSAndy Lutomirski je .Lbstep_iret 97342c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 97418ec54fdSJosh Poimboeuf jne .Lerror_entry_done_lfence 975539f5113SAndy Lutomirski 976539f5113SAndy Lutomirski /* 97742c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 978539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 97942c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 980539f5113SAndy Lutomirski */ 9812fa5f04fSWanpeng Li SWAPGS 98218ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 9832fa5f04fSWanpeng Li jmp .Lerror_entry_done 984905a36a2SIngo Molnar 985cb6f64edSAndy Lutomirski.Lbstep_iret: 986905a36a2SIngo Molnar /* Fix truncated RIP */ 987905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 988905a36a2SIngo Molnar /* fall through */ 989905a36a2SIngo Molnar 990cb6f64edSAndy Lutomirski.Lerror_bad_iret: 991539f5113SAndy Lutomirski /* 9928a09317bSDave Hansen * We came from an IRET to user mode, so we have user 9938a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 994539f5113SAndy Lutomirski */ 995905a36a2SIngo Molnar SWAPGS 99618ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 9978a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 998539f5113SAndy Lutomirski 999539f5113SAndy Lutomirski /* 1000539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1001b3681dd5SAndy Lutomirski * as if we faulted immediately after IRET. 1002539f5113SAndy Lutomirski */ 1003905a36a2SIngo Molnar mov %rsp, %rdi 1004905a36a2SIngo Molnar call fixup_bad_iret 1005905a36a2SIngo Molnar mov %rax, %rsp 1006cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1007ef1e0315SJiri SlabySYM_CODE_END(error_entry) 1008905a36a2SIngo Molnar 1009424c7d0aSThomas GleixnerSYM_CODE_START_LOCAL(error_return) 1010424c7d0aSThomas Gleixner UNWIND_HINT_REGS 1011424c7d0aSThomas Gleixner DEBUG_ENTRY_ASSERT_IRQS_OFF 1012424c7d0aSThomas Gleixner testb $3, CS(%rsp) 1013424c7d0aSThomas Gleixner jz restore_regs_and_return_to_kernel 1014424c7d0aSThomas Gleixner jmp swapgs_restore_regs_and_return_to_usermode 1015424c7d0aSThomas GleixnerSYM_CODE_END(error_return) 1016424c7d0aSThomas Gleixner 1017929bacecSAndy Lutomirski/* 1018929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1019929bacecSAndy Lutomirski * so we can use real assembly here. 10208a09317bSDave Hansen * 10218a09317bSDave Hansen * Registers: 10228a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 10238a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1024929bacecSAndy Lutomirski */ 10256271fef0SThomas GleixnerSYM_CODE_START(asm_exc_nmi) 10268c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1027929bacecSAndy Lutomirski 1028fc57a7c6SAndy Lutomirski /* 1029905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1030905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1031905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1032905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1033905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1034905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1035905a36a2SIngo Molnar * anyway. 1036905a36a2SIngo Molnar * 1037905a36a2SIngo Molnar * To handle this case we do the following: 1038905a36a2SIngo Molnar * Check the a special location on the stack that contains 1039905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1040905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1041905a36a2SIngo Molnar * is an NMI stack. 1042905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1043905a36a2SIngo Molnar * stack then: 1044905a36a2SIngo Molnar * o Set the special variable on the stack 10450b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 10460b22930eSAndy Lutomirski * stack 10470b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1048905a36a2SIngo Molnar * o Continue processing the NMI 1049905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 10500b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1051905a36a2SIngo Molnar * o return back to the first NMI 1052905a36a2SIngo Molnar * 1053905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1054905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1055905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1056905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1057905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1058905a36a2SIngo Molnar * NMI. 10599b6e6a83SAndy Lutomirski * 10609b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 10619b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 10629b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 10639b6e6a83SAndy Lutomirski * other IST entries. 1064905a36a2SIngo Molnar */ 1065905a36a2SIngo Molnar 1066e93c1730SAndy Lutomirski ASM_CLAC 1067e93c1730SAndy Lutomirski 1068905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1069905a36a2SIngo Molnar pushq %rdx 1070905a36a2SIngo Molnar 10719b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 10729b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1073905a36a2SIngo Molnar 1074905a36a2SIngo Molnar /* 10759b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 10769b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 10779b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 10789b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 10799b6e6a83SAndy Lutomirski * are off. 108083c133cfSAndy Lutomirski * 108183c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 108283c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 10839b6e6a83SAndy Lutomirski */ 10849b6e6a83SAndy Lutomirski 1085929bacecSAndy Lutomirski swapgs 10869b6e6a83SAndy Lutomirski cld 108718ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10888a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 10899b6e6a83SAndy Lutomirski movq %rsp, %rdx 10909b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 10918c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 10929b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 10939b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 10949b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 10959b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 10969b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 10978c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 10989b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 109930907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rdx=(%rdx) 1100946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 11019b6e6a83SAndy Lutomirski 11029b6e6a83SAndy Lutomirski /* 11039b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 11049b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 11059b6e6a83SAndy Lutomirski * done with the NMI stack. 11069b6e6a83SAndy Lutomirski */ 11079b6e6a83SAndy Lutomirski 11089b6e6a83SAndy Lutomirski movq %rsp, %rdi 11099b6e6a83SAndy Lutomirski movq $-1, %rsi 11106271fef0SThomas Gleixner call exc_nmi 11119b6e6a83SAndy Lutomirski 11129b6e6a83SAndy Lutomirski /* 11139b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1114946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 11159b6e6a83SAndy Lutomirski */ 11168a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 11179b6e6a83SAndy Lutomirski 11189b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 11199b6e6a83SAndy Lutomirski /* 11200b22930eSAndy Lutomirski * Here's what our stack frame will look like: 11210b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11220b22930eSAndy Lutomirski * | original SS | 11230b22930eSAndy Lutomirski * | original Return RSP | 11240b22930eSAndy Lutomirski * | original RFLAGS | 11250b22930eSAndy Lutomirski * | original CS | 11260b22930eSAndy Lutomirski * | original RIP | 11270b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11280b22930eSAndy Lutomirski * | temp storage for rdx | 11290b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11300b22930eSAndy Lutomirski * | "NMI executing" variable | 11310b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11320b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 11330b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 11340b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 11350b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 11360b22930eSAndy Lutomirski * | iret RIP } | 11370b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11380b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 11390b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 11400b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 11410b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 11420b22930eSAndy Lutomirski * | outermost RIP } iteration. | 11430b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11440b22930eSAndy Lutomirski * | pt_regs | 11450b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11460b22930eSAndy Lutomirski * 11470b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 11480b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 11490b22930eSAndy Lutomirski * space for the asm code here. 11500b22930eSAndy Lutomirski * 11510b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 11520b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 11530b22930eSAndy Lutomirski * processing. 11540b22930eSAndy Lutomirski * 11550b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 11560b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 11570b22930eSAndy Lutomirski * frame pointing to the final return target. 11580b22930eSAndy Lutomirski */ 11590b22930eSAndy Lutomirski 11600b22930eSAndy Lutomirski /* 11610b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 11620b22930eSAndy Lutomirski * 1163a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1164a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1165a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1166a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 11676271fef0SThomas Gleixner * about to about to call exc_nmi() anyway, so we can just 1168a27507caSAndy Lutomirski * resume the outer NMI. 1169a27507caSAndy Lutomirski */ 1170a27507caSAndy Lutomirski 1171a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1172a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1173a27507caSAndy Lutomirski ja 1f 1174a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1175a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1176a27507caSAndy Lutomirski ja nested_nmi_out 1177a27507caSAndy Lutomirski1: 1178a27507caSAndy Lutomirski 1179a27507caSAndy Lutomirski /* 1180a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 11810b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 11820b22930eSAndy Lutomirski * before IRET. 1183905a36a2SIngo Molnar */ 1184905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1185905a36a2SIngo Molnar je nested_nmi 1186905a36a2SIngo Molnar 1187905a36a2SIngo Molnar /* 11880b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 11890b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1190810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1191810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1192810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1193810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1194810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1195810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1196810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1197810bc075SAndy Lutomirski * "NMI executing". 1198905a36a2SIngo Molnar */ 1199905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1200905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1201905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1202905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1203905a36a2SIngo Molnar ja first_nmi 12044d732138SIngo Molnar 1205905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1206905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1207905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1208905a36a2SIngo Molnar jb first_nmi 1209810bc075SAndy Lutomirski 1210810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1211810bc075SAndy Lutomirski 1212810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1213810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1214810bc075SAndy Lutomirski 1215810bc075SAndy Lutomirski /* This is a nested NMI. */ 1216905a36a2SIngo Molnar 1217905a36a2SIngo Molnarnested_nmi: 1218905a36a2SIngo Molnar /* 12190b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 12200b22930eSAndy Lutomirski * iteration of NMI handling. 1221905a36a2SIngo Molnar */ 122223a781e9SAndy Lutomirski subq $8, %rsp 1223905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1224905a36a2SIngo Molnar pushq $__KERNEL_DS 1225905a36a2SIngo Molnar pushq %rdx 1226905a36a2SIngo Molnar pushfq 1227905a36a2SIngo Molnar pushq $__KERNEL_CS 1228905a36a2SIngo Molnar pushq $repeat_nmi 1229905a36a2SIngo Molnar 1230905a36a2SIngo Molnar /* Put stack back */ 1231905a36a2SIngo Molnar addq $(6*8), %rsp 1232905a36a2SIngo Molnar 1233905a36a2SIngo Molnarnested_nmi_out: 1234905a36a2SIngo Molnar popq %rdx 1235905a36a2SIngo Molnar 12360b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1237929bacecSAndy Lutomirski iretq 1238905a36a2SIngo Molnar 1239905a36a2SIngo Molnarfirst_nmi: 12400b22930eSAndy Lutomirski /* Restore rdx. */ 1241905a36a2SIngo Molnar movq (%rsp), %rdx 1242905a36a2SIngo Molnar 124336f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 124436f1a77bSAndy Lutomirski pushq $0 1245905a36a2SIngo Molnar 12460b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1247905a36a2SIngo Molnar subq $(5*8), %rsp 1248905a36a2SIngo Molnar 12490b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1250905a36a2SIngo Molnar .rept 5 1251905a36a2SIngo Molnar pushq 11*8(%rsp) 1252905a36a2SIngo Molnar .endr 12538c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1254905a36a2SIngo Molnar 1255905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1256905a36a2SIngo Molnar 1257a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1258a97439aaSAndy Lutomirski /* 1259a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1260a97439aaSAndy Lutomirski * default because IRET is very expensive. 1261a97439aaSAndy Lutomirski */ 1262a97439aaSAndy Lutomirski pushq $0 /* SS */ 1263a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1264a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1265a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1266a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1267a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1268929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 12698c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1270a97439aaSAndy Lutomirski1: 1271a97439aaSAndy Lutomirski#endif 1272a97439aaSAndy Lutomirski 12730b22930eSAndy Lutomirskirepeat_nmi: 1274905a36a2SIngo Molnar /* 1275905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1276905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1277905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1278905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1279905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1280905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1281905a36a2SIngo Molnar * NMI will update. 12820b22930eSAndy Lutomirski * 12830b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 12840b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 12850b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 12866271fef0SThomas Gleixner * gsbase if needed before we call exc_nmi(). "NMI executing" 128736f1a77bSAndy Lutomirski * is zero. 1288905a36a2SIngo Molnar */ 128936f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1290905a36a2SIngo Molnar 12910b22930eSAndy Lutomirski /* 12920b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 12930b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 12940b22930eSAndy Lutomirski * it or it will end up containing garbage. 12950b22930eSAndy Lutomirski */ 1296905a36a2SIngo Molnar addq $(10*8), %rsp 1297905a36a2SIngo Molnar .rept 5 1298905a36a2SIngo Molnar pushq -6*8(%rsp) 1299905a36a2SIngo Molnar .endr 1300905a36a2SIngo Molnar subq $(5*8), %rsp 1301905a36a2SIngo Molnarend_repeat_nmi: 1302905a36a2SIngo Molnar 1303905a36a2SIngo Molnar /* 13040b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 13050b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 13060b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1307905a36a2SIngo Molnar */ 1308905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1309905a36a2SIngo Molnar 1310905a36a2SIngo Molnar /* 1311905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1312905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1313905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1314905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1315905a36a2SIngo Molnar * exceptions might do. 1316905a36a2SIngo Molnar */ 1317905a36a2SIngo Molnar call paranoid_entry 13188c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1319905a36a2SIngo Molnar 1320905a36a2SIngo Molnar movq %rsp, %rdi 1321905a36a2SIngo Molnar movq $-1, %rsi 13226271fef0SThomas Gleixner call exc_nmi 1323905a36a2SIngo Molnar 132416561f27SDave Hansen /* Always restore stashed CR3 value (see paranoid_entry) */ 132521e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 13268a09317bSDave Hansen 1327*c82965f9SChang S. Bae /* 1328*c82965f9SChang S. Bae * The above invocation of paranoid_entry stored the GSBASE 1329*c82965f9SChang S. Bae * related information in R/EBX depending on the availability 1330*c82965f9SChang S. Bae * of FSGSBASE. 1331*c82965f9SChang S. Bae * 1332*c82965f9SChang S. Bae * If FSGSBASE is enabled, restore the saved GSBASE value 1333*c82965f9SChang S. Bae * unconditionally, otherwise take the conditional SWAPGS path. 1334*c82965f9SChang S. Bae */ 1335*c82965f9SChang S. Bae ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1336*c82965f9SChang S. Bae 1337*c82965f9SChang S. Bae wrgsbase %rbx 1338*c82965f9SChang S. Bae jmp nmi_restore 1339*c82965f9SChang S. Bae 1340*c82965f9SChang S. Baenmi_no_fsgsbase: 1341*c82965f9SChang S. Bae /* EBX == 0 -> invoke SWAPGS */ 1342*c82965f9SChang S. Bae testl %ebx, %ebx 1343905a36a2SIngo Molnar jnz nmi_restore 1344*c82965f9SChang S. Bae 1345905a36a2SIngo Molnarnmi_swapgs: 1346905a36a2SIngo Molnar SWAPGS_UNSAFE_STACK 1347*c82965f9SChang S. Bae 1348905a36a2SIngo Molnarnmi_restore: 1349502af0d7SDominik Brodowski POP_REGS 13500b22930eSAndy Lutomirski 1351471ee483SAndy Lutomirski /* 1352471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1353471ee483SAndy Lutomirski * at the "iret" frame. 1354471ee483SAndy Lutomirski */ 1355471ee483SAndy Lutomirski addq $6*8, %rsp 1356905a36a2SIngo Molnar 1357810bc075SAndy Lutomirski /* 1358810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1359810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1360929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1361929bacecSAndy Lutomirski * 1362929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1363929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1364929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1365810bc075SAndy Lutomirski */ 1366810bc075SAndy Lutomirski std 1367810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 13680b22930eSAndy Lutomirski 13690b22930eSAndy Lutomirski /* 1370929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1371929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1372929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1373929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 13740b22930eSAndy Lutomirski */ 1375929bacecSAndy Lutomirski iretq 13766271fef0SThomas GleixnerSYM_CODE_END(asm_exc_nmi) 1377905a36a2SIngo Molnar 1378dffb3f9dSAndy Lutomirski#ifndef CONFIG_IA32_EMULATION 1379dffb3f9dSAndy Lutomirski/* 1380dffb3f9dSAndy Lutomirski * This handles SYSCALL from 32-bit code. There is no way to program 1381dffb3f9dSAndy Lutomirski * MSRs to fully disable 32-bit SYSCALL. 1382dffb3f9dSAndy Lutomirski */ 1383bc7b11c0SJiri SlabySYM_CODE_START(ignore_sysret) 13848c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1385905a36a2SIngo Molnar mov $-ENOSYS, %eax 1386b2b1d94cSJan Beulich sysretl 1387bc7b11c0SJiri SlabySYM_CODE_END(ignore_sysret) 1388dffb3f9dSAndy Lutomirski#endif 13892deb4be2SAndy Lutomirski 1390b9f6976bSThomas Gleixner.pushsection .text, "ax" 1391bc7b11c0SJiri SlabySYM_CODE_START(rewind_stack_do_exit) 13928c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 13932deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 13942deb4be2SAndy Lutomirski xorl %ebp, %ebp 13952deb4be2SAndy Lutomirski 13962deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 13978c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 1398f977df7bSJann Horn UNWIND_HINT_REGS 13992deb4be2SAndy Lutomirski 14002deb4be2SAndy Lutomirski call do_exit 1401bc7b11c0SJiri SlabySYM_CODE_END(rewind_stack_do_exit) 1402b9f6976bSThomas Gleixner.popsection 1403