1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11cb1aaebeSMauro Carvalho Chehab * Some of this is documented in Documentation/x86/entry_64.rst 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 186dcc5627SJiri Slaby * - SYM_FUNC_START/END:Define functions in the symbol table. 194d732138SIngo Molnar * - idtentry: Define exception entry points. 20905a36a2SIngo Molnar */ 21905a36a2SIngo Molnar#include <linux/linkage.h> 22905a36a2SIngo Molnar#include <asm/segment.h> 23905a36a2SIngo Molnar#include <asm/cache.h> 24905a36a2SIngo Molnar#include <asm/errno.h> 25905a36a2SIngo Molnar#include <asm/asm-offsets.h> 26905a36a2SIngo Molnar#include <asm/msr.h> 27905a36a2SIngo Molnar#include <asm/unistd.h> 28905a36a2SIngo Molnar#include <asm/thread_info.h> 29905a36a2SIngo Molnar#include <asm/hw_irq.h> 30905a36a2SIngo Molnar#include <asm/page_types.h> 31905a36a2SIngo Molnar#include <asm/irqflags.h> 32905a36a2SIngo Molnar#include <asm/paravirt.h> 33905a36a2SIngo Molnar#include <asm/percpu.h> 34905a36a2SIngo Molnar#include <asm/asm.h> 35905a36a2SIngo Molnar#include <asm/smap.h> 36905a36a2SIngo Molnar#include <asm/pgtable_types.h> 37784d5699SAl Viro#include <asm/export.h> 388c1f7558SJosh Poimboeuf#include <asm/frame.h> 39cfa82a00SThomas Gleixner#include <asm/trapnr.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41c82965f9SChang S. Bae#include <asm/fsgsbase.h> 42905a36a2SIngo Molnar#include <linux/err.h> 43905a36a2SIngo Molnar 446fd166aaSPeter Zijlstra#include "calling.h" 456fd166aaSPeter Zijlstra 46905a36a2SIngo Molnar.code64 47905a36a2SIngo Molnar.section .entry.text, "ax" 48905a36a2SIngo Molnar 49905a36a2SIngo Molnar/* 504d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 51905a36a2SIngo Molnar * 52fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 53fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 54fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 55fda57b22SAndy Lutomirski * available when SYSCALL is used. 56fda57b22SAndy Lutomirski * 57fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 58fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 59fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 60fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 61fda57b22SAndy Lutomirski * 624d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 63905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 64905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 65905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 66905a36a2SIngo Molnar * and does not change rsp. 67905a36a2SIngo Molnar * 68905a36a2SIngo Molnar * Registers on entry: 69905a36a2SIngo Molnar * rax system call number 70905a36a2SIngo Molnar * rcx return address 71905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 72905a36a2SIngo Molnar * rdi arg0 73905a36a2SIngo Molnar * rsi arg1 74905a36a2SIngo Molnar * rdx arg2 75905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 76905a36a2SIngo Molnar * r8 arg4 77905a36a2SIngo Molnar * r9 arg5 78905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 79905a36a2SIngo Molnar * 80905a36a2SIngo Molnar * Only called from user space. 81905a36a2SIngo Molnar * 82905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 83905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 84905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 85905a36a2SIngo Molnar */ 86905a36a2SIngo Molnar 87bc7b11c0SJiri SlabySYM_CODE_START(entry_SYSCALL_64) 888c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 898f93402bSPeter Zijlstra ENDBR 90905a36a2SIngo Molnar 918a9949bcSAndy Lutomirski swapgs 92bf904d27SAndy Lutomirski /* tss.sp2 is scratch space. */ 9398f05b51SAndy Lutomirski movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 94bf904d27SAndy Lutomirski SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 95905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 96905a36a2SIngo Molnar 97a13644f3SJoerg RoedelSYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) 98e8d61bdfSPeter Zijlstra ANNOTATE_NOENDBR 99a13644f3SJoerg Roedel 100905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 101905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 10298f05b51SAndy Lutomirski pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 103905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 104905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 105905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 10626ba4e57SJiri SlabySYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) 107905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 10830907fd1SDominik Brodowski 10930907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rax=$-ENOSYS 110905a36a2SIngo Molnar 1111e423bffSAndy Lutomirski /* IRQs are off. */ 1123e5e7f77SH. Peter Anvin (Intel) movq %rsp, %rdi 11305954948SH. Peter Anvin (Intel) /* Sign extend the lower 32bit as syscall numbers are treated as int */ 11405954948SH. Peter Anvin (Intel) movslq %eax, %rsi 1151e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 1161e423bffSAndy Lutomirski 117905a36a2SIngo Molnar /* 118905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 1198a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 1208a055d7fSAndy Lutomirski * go to the slow exit path. 121afd30525SJuergen Gross * In the Xen PV case we must use iret anyway. 122905a36a2SIngo Molnar */ 123afd30525SJuergen Gross 124afd30525SJuergen Gross ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ 125afd30525SJuergen Gross X86_FEATURE_XENPV 126afd30525SJuergen Gross 127905a36a2SIngo Molnar movq RCX(%rsp), %rcx 128905a36a2SIngo Molnar movq RIP(%rsp), %r11 1298a055d7fSAndy Lutomirski 1308a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 1318a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 132905a36a2SIngo Molnar 133905a36a2SIngo Molnar /* 134905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 135905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 136905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 137905a36a2SIngo Molnar * 138905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 139905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 140361b4b58SKirill A. Shutemov * 141cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 142cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 143905a36a2SIngo Molnar */ 14409e61a77SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 14539b95522SKirill A. Shutemov ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 14639b95522SKirill A. Shutemov "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 14709e61a77SKirill A. Shutemov#else 148905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 149905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 15009e61a77SKirill A. Shutemov#endif 1514d732138SIngo Molnar 152905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 153905a36a2SIngo Molnar cmpq %rcx, %r11 1548a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 155905a36a2SIngo Molnar 156905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 1578a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 158905a36a2SIngo Molnar 159905a36a2SIngo Molnar movq R11(%rsp), %r11 160905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 1618a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 162905a36a2SIngo Molnar 163905a36a2SIngo Molnar /* 1643e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 1653e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 1663e035305SBorislav Petkov * need to restore it correctly. 1673e035305SBorislav Petkov * 1683e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 1693e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 1703e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 1713e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 1723e035305SBorislav Petkov * this user code: 173905a36a2SIngo Molnar * 174905a36a2SIngo Molnar * movq $stuck_here, %rcx 175905a36a2SIngo Molnar * pushfq 176905a36a2SIngo Molnar * popq %r11 177905a36a2SIngo Molnar * stuck_here: 178905a36a2SIngo Molnar * 179905a36a2SIngo Molnar * would never get past 'stuck_here'. 180905a36a2SIngo Molnar */ 181905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 1828a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 183905a36a2SIngo Molnar 184905a36a2SIngo Molnar /* nothing to check for RSP */ 185905a36a2SIngo Molnar 186905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 1878a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 188905a36a2SIngo Molnar 189905a36a2SIngo Molnar /* 190905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 191905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 192905a36a2SIngo Molnar */ 193905a36a2SIngo Molnarsyscall_return_via_sysret: 194905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 195502af0d7SDominik Brodowski POP_REGS pop_rdi=0 skip_r11rcx=1 1963e3b9293SAndy Lutomirski 1973e3b9293SAndy Lutomirski /* 1983e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 1993e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 2003e3b9293SAndy Lutomirski */ 2013e3b9293SAndy Lutomirski movq %rsp, %rdi 202c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 2031fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 2043e3b9293SAndy Lutomirski 2053e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 2063e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 2073e3b9293SAndy Lutomirski 2083e3b9293SAndy Lutomirski /* 2093e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 2103e3b9293SAndy Lutomirski * We can do future final exit work right here. 2113e3b9293SAndy Lutomirski */ 212afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 213afaef01cSAlexander Popov 2146fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 2153e3b9293SAndy Lutomirski 2164fbb3910SAndy Lutomirski popq %rdi 2173e3b9293SAndy Lutomirski popq %rsp 218afd30525SJuergen Gross swapgs 219afd30525SJuergen Gross sysretq 220bc7b11c0SJiri SlabySYM_CODE_END(entry_SYSCALL_64) 221905a36a2SIngo Molnar 222905a36a2SIngo Molnar/* 2230100301bSBrian Gerst * %rdi: prev task 2240100301bSBrian Gerst * %rsi: next task 2250100301bSBrian Gerst */ 226b9f6976bSThomas Gleixner.pushsection .text, "ax" 22796c64806SJosh PoimboeufSYM_FUNC_START(__switch_to_asm) 2280100301bSBrian Gerst /* 2290100301bSBrian Gerst * Save callee-saved registers 2300100301bSBrian Gerst * This must match the order in inactive_task_frame 2310100301bSBrian Gerst */ 2320100301bSBrian Gerst pushq %rbp 2330100301bSBrian Gerst pushq %rbx 2340100301bSBrian Gerst pushq %r12 2350100301bSBrian Gerst pushq %r13 2360100301bSBrian Gerst pushq %r14 2370100301bSBrian Gerst pushq %r15 2380100301bSBrian Gerst 2390100301bSBrian Gerst /* switch stack */ 2400100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 2410100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 2420100301bSBrian Gerst 243050e9baaSLinus Torvalds#ifdef CONFIG_STACKPROTECTOR 2440100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 245e6401c13SAndy Lutomirski movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 2460100301bSBrian Gerst#endif 2470100301bSBrian Gerst 248c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 249c995efd5SDavid Woodhouse /* 250c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 251c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 252c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 253c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 254c995efd5SDavid Woodhouse * speculative execution to prevent attack. 255c995efd5SDavid Woodhouse */ 256d1c99108SDavid Woodhouse FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 257c995efd5SDavid Woodhouse#endif 258c995efd5SDavid Woodhouse 2590100301bSBrian Gerst /* restore callee-saved registers */ 2600100301bSBrian Gerst popq %r15 2610100301bSBrian Gerst popq %r14 2620100301bSBrian Gerst popq %r13 2630100301bSBrian Gerst popq %r12 2640100301bSBrian Gerst popq %rbx 2650100301bSBrian Gerst popq %rbp 2660100301bSBrian Gerst 2670100301bSBrian Gerst jmp __switch_to 26896c64806SJosh PoimboeufSYM_FUNC_END(__switch_to_asm) 269b9f6976bSThomas Gleixner.popsection 2700100301bSBrian Gerst 2710100301bSBrian Gerst/* 272905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 273905a36a2SIngo Molnar * 2740100301bSBrian Gerst * rax: prev task we switched from 275616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 276616d2483SBrian Gerst * r12: kernel thread arg 277905a36a2SIngo Molnar */ 278b9f6976bSThomas Gleixner.pushsection .text, "ax" 279bc7b11c0SJiri SlabySYM_CODE_START(ret_from_fork) 2808c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 2813e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // copy_thread 2820100301bSBrian Gerst movq %rax, %rdi 2834d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 284905a36a2SIngo Molnar 285616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 286616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 287905a36a2SIngo Molnar 288616d2483SBrian Gerst2: 2898c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 290ebd57499SJosh Poimboeuf movq %rsp, %rdi 291167fd210SThomas Gleixner call syscall_exit_to_user_mode /* returns with IRQs disabled */ 2928a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 293616d2483SBrian Gerst 294616d2483SBrian Gerst1: 295616d2483SBrian Gerst /* kernel thread */ 296d31a5802SJosh Poimboeuf UNWIND_HINT_EMPTY 297616d2483SBrian Gerst movq %r12, %rdi 29834fdce69SPeter Zijlstra CALL_NOSPEC rbx 299616d2483SBrian Gerst /* 300616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 301be619f7fSEric W. Biederman * calling kernel_execve(). Exit to userspace to complete the execve() 302616d2483SBrian Gerst * syscall. 303616d2483SBrian Gerst */ 304616d2483SBrian Gerst movq $0, RAX(%rsp) 305616d2483SBrian Gerst jmp 2b 306bc7b11c0SJiri SlabySYM_CODE_END(ret_from_fork) 307b9f6976bSThomas Gleixner.popsection 308905a36a2SIngo Molnar 3091d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 3101d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 311e17f8234SBoris Ostrovsky pushq %rax 312fafe5e74SJuergen Gross SAVE_FLAGS 313e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 3141d3e53e8SAndy Lutomirski jz .Lokay_\@ 3151d3e53e8SAndy Lutomirski ud2 3161d3e53e8SAndy Lutomirski.Lokay_\@: 317e17f8234SBoris Ostrovsky popq %rax 3181d3e53e8SAndy Lutomirski#endif 3191d3e53e8SAndy Lutomirski.endm 3201d3e53e8SAndy Lutomirski 321*ee774dacSLai Jiangshan/* Save all registers in pt_regs */ 322*ee774dacSLai JiangshanSYM_CODE_START_LOCAL(push_and_clear_regs) 323*ee774dacSLai Jiangshan UNWIND_HINT_FUNC 324*ee774dacSLai Jiangshan PUSH_AND_CLEAR_REGS save_ret=1 325*ee774dacSLai Jiangshan ENCODE_FRAME_POINTER 8 326*ee774dacSLai Jiangshan RET 327*ee774dacSLai JiangshanSYM_CODE_END(push_and_clear_regs) 328*ee774dacSLai Jiangshan 329cfa82a00SThomas Gleixner/** 330cfa82a00SThomas Gleixner * idtentry_body - Macro to emit code calling the C function 331cfa82a00SThomas Gleixner * @cfunc: C function to be called 332cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 333cfa82a00SThomas Gleixner */ 334e2dcb5f1SThomas Gleixner.macro idtentry_body cfunc has_error_code:req 335cfa82a00SThomas Gleixner 336*ee774dacSLai Jiangshan call push_and_clear_regs 337*ee774dacSLai Jiangshan UNWIND_HINT_REGS 338*ee774dacSLai Jiangshan 339cfa82a00SThomas Gleixner call error_entry 340520a7e80SLai Jiangshan movq %rax, %rsp /* switch to the task stack if from userspace */ 341520a7e80SLai Jiangshan ENCODE_FRAME_POINTER 342cfa82a00SThomas Gleixner UNWIND_HINT_REGS 343cfa82a00SThomas Gleixner 344cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ 345cfa82a00SThomas Gleixner 346cfa82a00SThomas Gleixner .if \has_error_code == 1 347cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 348cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 349cfa82a00SThomas Gleixner .endif 350cfa82a00SThomas Gleixner 351cfa82a00SThomas Gleixner call \cfunc 352cfa82a00SThomas Gleixner 353d66e9d50SPeter Zijlstra /* For some configurations \cfunc ends up being a noreturn. */ 354d66e9d50SPeter Zijlstra REACHABLE 355d66e9d50SPeter Zijlstra 356424c7d0aSThomas Gleixner jmp error_return 357cfa82a00SThomas Gleixner.endm 358cfa82a00SThomas Gleixner 359cfa82a00SThomas Gleixner/** 360cfa82a00SThomas Gleixner * idtentry - Macro to generate entry stubs for simple IDT entries 361cfa82a00SThomas Gleixner * @vector: Vector number 362cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 363cfa82a00SThomas Gleixner * @cfunc: C function to be called 364cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 365cfa82a00SThomas Gleixner * 366cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for straight forward 367cfa82a00SThomas Gleixner * and simple IDT entries. No IST stack, no paranoid entry checks. 368cfa82a00SThomas Gleixner */ 369e2dcb5f1SThomas Gleixner.macro idtentry vector asmsym cfunc has_error_code:req 370cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 371cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=\has_error_code*8 3728f93402bSPeter Zijlstra ENDBR 373cfa82a00SThomas Gleixner ASM_CLAC 374cfa82a00SThomas Gleixner 375cfa82a00SThomas Gleixner .if \has_error_code == 0 376cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 377cfa82a00SThomas Gleixner .endif 378cfa82a00SThomas Gleixner 379cfa82a00SThomas Gleixner .if \vector == X86_TRAP_BP 380cfa82a00SThomas Gleixner /* 381cfa82a00SThomas Gleixner * If coming from kernel space, create a 6-word gap to allow the 382cfa82a00SThomas Gleixner * int3 handler to emulate a call instruction. 383cfa82a00SThomas Gleixner */ 384cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 385cfa82a00SThomas Gleixner jnz .Lfrom_usermode_no_gap_\@ 386cfa82a00SThomas Gleixner .rept 6 387cfa82a00SThomas Gleixner pushq 5*8(%rsp) 388cfa82a00SThomas Gleixner .endr 389cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 390cfa82a00SThomas Gleixner.Lfrom_usermode_no_gap_\@: 391cfa82a00SThomas Gleixner .endif 392cfa82a00SThomas Gleixner 393e2dcb5f1SThomas Gleixner idtentry_body \cfunc \has_error_code 394cfa82a00SThomas Gleixner 395cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 396cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 397cfa82a00SThomas Gleixner.endm 398cfa82a00SThomas Gleixner 399cfa82a00SThomas Gleixner/* 4000bf7c314SThomas Gleixner * Interrupt entry/exit. 4010bf7c314SThomas Gleixner * 4020bf7c314SThomas Gleixner + The interrupt stubs push (vector) onto the stack, which is the error_code 4030bf7c314SThomas Gleixner * position of idtentry exceptions, and jump to one of the two idtentry points 4040bf7c314SThomas Gleixner * (common/spurious). 4050bf7c314SThomas Gleixner * 4060bf7c314SThomas Gleixner * common_interrupt is a hotpath, align it to a cache line 4070bf7c314SThomas Gleixner */ 4080bf7c314SThomas Gleixner.macro idtentry_irq vector cfunc 4090bf7c314SThomas Gleixner .p2align CONFIG_X86_L1_CACHE_SHIFT 4100bf7c314SThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=1 4110bf7c314SThomas Gleixner.endm 4120bf7c314SThomas Gleixner 4130bf7c314SThomas Gleixner/* 4146368558cSThomas Gleixner * System vectors which invoke their handlers directly and are not 4156368558cSThomas Gleixner * going through the regular common device interrupt handling code. 4166368558cSThomas Gleixner */ 4176368558cSThomas Gleixner.macro idtentry_sysvec vector cfunc 4186368558cSThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=0 4196368558cSThomas Gleixner.endm 4206368558cSThomas Gleixner 421cfa82a00SThomas Gleixner/** 422cfa82a00SThomas Gleixner * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB 423cfa82a00SThomas Gleixner * @vector: Vector number 424cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 425cfa82a00SThomas Gleixner * @cfunc: C function to be called 426cfa82a00SThomas Gleixner * 427cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for #MC and #DB 428cfa82a00SThomas Gleixner * 429cfa82a00SThomas Gleixner * If the entry comes from user space it uses the normal entry path 430cfa82a00SThomas Gleixner * including the return to user space work and preemption checks on 431cfa82a00SThomas Gleixner * exit. 432cfa82a00SThomas Gleixner * 433cfa82a00SThomas Gleixner * If hits in kernel mode then it needs to go through the paranoid 434cfa82a00SThomas Gleixner * entry as the exception can hit any random state. No preemption 435cfa82a00SThomas Gleixner * check on exit to keep the paranoid path simple. 436cfa82a00SThomas Gleixner */ 437cfa82a00SThomas Gleixner.macro idtentry_mce_db vector asmsym cfunc 438cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 439cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS 4408f93402bSPeter Zijlstra ENDBR 441cfa82a00SThomas Gleixner ASM_CLAC 442cfa82a00SThomas Gleixner 443cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 444cfa82a00SThomas Gleixner 445cfa82a00SThomas Gleixner /* 446cfa82a00SThomas Gleixner * If the entry is from userspace, switch stacks and treat it as 447cfa82a00SThomas Gleixner * a normal entry. 448cfa82a00SThomas Gleixner */ 449cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 450cfa82a00SThomas Gleixner jnz .Lfrom_usermode_switch_stack_\@ 451cfa82a00SThomas Gleixner 452c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 453cfa82a00SThomas Gleixner call paranoid_entry 454cfa82a00SThomas Gleixner 455cfa82a00SThomas Gleixner UNWIND_HINT_REGS 456cfa82a00SThomas Gleixner 457cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer */ 458cfa82a00SThomas Gleixner 459cfa82a00SThomas Gleixner call \cfunc 460cfa82a00SThomas Gleixner 461cfa82a00SThomas Gleixner jmp paranoid_exit 462cfa82a00SThomas Gleixner 463cfa82a00SThomas Gleixner /* Switch to the regular task stack and use the noist entry point */ 464cfa82a00SThomas Gleixner.Lfrom_usermode_switch_stack_\@: 465e2dcb5f1SThomas Gleixner idtentry_body noist_\cfunc, has_error_code=0 466cfa82a00SThomas Gleixner 467cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 468cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 469cfa82a00SThomas Gleixner.endm 470cfa82a00SThomas Gleixner 471a13644f3SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 472a13644f3SJoerg Roedel/** 473a13644f3SJoerg Roedel * idtentry_vc - Macro to generate entry stub for #VC 474a13644f3SJoerg Roedel * @vector: Vector number 475a13644f3SJoerg Roedel * @asmsym: ASM symbol for the entry point 476a13644f3SJoerg Roedel * @cfunc: C function to be called 477a13644f3SJoerg Roedel * 478a13644f3SJoerg Roedel * The macro emits code to set up the kernel context for #VC. The #VC handler 479a13644f3SJoerg Roedel * runs on an IST stack and needs to be able to cause nested #VC exceptions. 480a13644f3SJoerg Roedel * 481a13644f3SJoerg Roedel * To make this work the #VC entry code tries its best to pretend it doesn't use 482a13644f3SJoerg Roedel * an IST stack by switching to the task stack if coming from user-space (which 483a13644f3SJoerg Roedel * includes early SYSCALL entry path) or back to the stack in the IRET frame if 484a13644f3SJoerg Roedel * entered from kernel-mode. 485a13644f3SJoerg Roedel * 486a13644f3SJoerg Roedel * If entered from kernel-mode the return stack is validated first, and if it is 487a13644f3SJoerg Roedel * not safe to use (e.g. because it points to the entry stack) the #VC handler 488a13644f3SJoerg Roedel * will switch to a fall-back stack (VC2) and call a special handler function. 489a13644f3SJoerg Roedel * 490a13644f3SJoerg Roedel * The macro is only used for one vector, but it is planned to be extended in 491a13644f3SJoerg Roedel * the future for the #HV exception. 492a13644f3SJoerg Roedel */ 493a13644f3SJoerg Roedel.macro idtentry_vc vector asmsym cfunc 494a13644f3SJoerg RoedelSYM_CODE_START(\asmsym) 495a13644f3SJoerg Roedel UNWIND_HINT_IRET_REGS 4968f93402bSPeter Zijlstra ENDBR 497a13644f3SJoerg Roedel ASM_CLAC 498a13644f3SJoerg Roedel 499a13644f3SJoerg Roedel /* 500a13644f3SJoerg Roedel * If the entry is from userspace, switch stacks and treat it as 501a13644f3SJoerg Roedel * a normal entry. 502a13644f3SJoerg Roedel */ 503a13644f3SJoerg Roedel testb $3, CS-ORIG_RAX(%rsp) 504a13644f3SJoerg Roedel jnz .Lfrom_usermode_switch_stack_\@ 505a13644f3SJoerg Roedel 506a13644f3SJoerg Roedel /* 507a13644f3SJoerg Roedel * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. 508a13644f3SJoerg Roedel * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS 509a13644f3SJoerg Roedel */ 510a13644f3SJoerg Roedel call paranoid_entry 511a13644f3SJoerg Roedel 512a13644f3SJoerg Roedel UNWIND_HINT_REGS 513a13644f3SJoerg Roedel 514a13644f3SJoerg Roedel /* 515a13644f3SJoerg Roedel * Switch off the IST stack to make it free for nested exceptions. The 516a13644f3SJoerg Roedel * vc_switch_off_ist() function will switch back to the interrupted 517a13644f3SJoerg Roedel * stack if it is safe to do so. If not it switches to the VC fall-back 518a13644f3SJoerg Roedel * stack. 519a13644f3SJoerg Roedel */ 520a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 521a13644f3SJoerg Roedel call vc_switch_off_ist 522a13644f3SJoerg Roedel movq %rax, %rsp /* Switch to new stack */ 523a13644f3SJoerg Roedel 524a13644f3SJoerg Roedel UNWIND_HINT_REGS 525a13644f3SJoerg Roedel 526a13644f3SJoerg Roedel /* Update pt_regs */ 527a13644f3SJoerg Roedel movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 528a13644f3SJoerg Roedel movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 529a13644f3SJoerg Roedel 530a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 531a13644f3SJoerg Roedel 532be1a5408SJoerg Roedel call kernel_\cfunc 533a13644f3SJoerg Roedel 534a13644f3SJoerg Roedel /* 535a13644f3SJoerg Roedel * No need to switch back to the IST stack. The current stack is either 536a13644f3SJoerg Roedel * identical to the stack in the IRET frame or the VC fall-back stack, 537163b0991SIngo Molnar * so it is definitely mapped even with PTI enabled. 538a13644f3SJoerg Roedel */ 539a13644f3SJoerg Roedel jmp paranoid_exit 540a13644f3SJoerg Roedel 541a13644f3SJoerg Roedel /* Switch to the regular task stack */ 542a13644f3SJoerg Roedel.Lfrom_usermode_switch_stack_\@: 543be1a5408SJoerg Roedel idtentry_body user_\cfunc, has_error_code=1 544a13644f3SJoerg Roedel 545a13644f3SJoerg Roedel_ASM_NOKPROBE(\asmsym) 546a13644f3SJoerg RoedelSYM_CODE_END(\asmsym) 547a13644f3SJoerg Roedel.endm 548a13644f3SJoerg Roedel#endif 549a13644f3SJoerg Roedel 550cfa82a00SThomas Gleixner/* 551cfa82a00SThomas Gleixner * Double fault entry. Straight paranoid. No checks from which context 552cfa82a00SThomas Gleixner * this comes because for the espfix induced #DF this would do the wrong 553cfa82a00SThomas Gleixner * thing. 554cfa82a00SThomas Gleixner */ 555cfa82a00SThomas Gleixner.macro idtentry_df vector asmsym cfunc 556cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 557cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 5588f93402bSPeter Zijlstra ENDBR 559cfa82a00SThomas Gleixner ASM_CLAC 560cfa82a00SThomas Gleixner 561c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 562cfa82a00SThomas Gleixner call paranoid_entry 563cfa82a00SThomas Gleixner UNWIND_HINT_REGS 564cfa82a00SThomas Gleixner 565cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into first argument */ 566cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 567cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 568cfa82a00SThomas Gleixner call \cfunc 569cfa82a00SThomas Gleixner 5703515899bSPeter Zijlstra /* For some configurations \cfunc ends up being a noreturn. */ 5713515899bSPeter Zijlstra REACHABLE 5723515899bSPeter Zijlstra 573cfa82a00SThomas Gleixner jmp paranoid_exit 574cfa82a00SThomas Gleixner 575cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 576cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 577cfa82a00SThomas Gleixner.endm 578cfa82a00SThomas Gleixner 579905a36a2SIngo Molnar/* 58053aaf262SThomas Gleixner * Include the defines which emit the idt entries which are shared 581f0178fc0SThomas Gleixner * shared between 32 and 64 bit and emit the __irqentry_text_* markers 582f0178fc0SThomas Gleixner * so the stacktrace boundary checks work. 58353aaf262SThomas Gleixner */ 584f0178fc0SThomas Gleixner .align 16 585f0178fc0SThomas Gleixner .globl __irqentry_text_start 586f0178fc0SThomas Gleixner__irqentry_text_start: 587f0178fc0SThomas Gleixner 58853aaf262SThomas Gleixner#include <asm/idtentry.h> 58953aaf262SThomas Gleixner 590f0178fc0SThomas Gleixner .align 16 591f0178fc0SThomas Gleixner .globl __irqentry_text_end 592f0178fc0SThomas Gleixner__irqentry_text_end: 5933e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 594f0178fc0SThomas Gleixner 595fa5e5c40SThomas GleixnerSYM_CODE_START_LOCAL(common_interrupt_return) 59626ba4e57SJiri SlabySYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) 59726c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 59826c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 5991e4c4f61SBorislav Petkov testb $3, CS(%rsp) 60026c4ef9cSAndy Lutomirski jnz 1f 60126c4ef9cSAndy Lutomirski ud2 60226c4ef9cSAndy Lutomirski1: 60326c4ef9cSAndy Lutomirski#endif 6045c8f6a2eSLai Jiangshan#ifdef CONFIG_XEN_PV 6055c8f6a2eSLai Jiangshan ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV 6065c8f6a2eSLai Jiangshan#endif 6075c8f6a2eSLai Jiangshan 608502af0d7SDominik Brodowski POP_REGS pop_rdi=0 6093e3b9293SAndy Lutomirski 6103e3b9293SAndy Lutomirski /* 6113e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 6123e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 6133e3b9293SAndy Lutomirski */ 6143e3b9293SAndy Lutomirski movq %rsp, %rdi 615c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 6161fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 6173e3b9293SAndy Lutomirski 6183e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 6193e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 6203e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 6213e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 6223e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 6233e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 6243e3b9293SAndy Lutomirski 6253e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 6263e3b9293SAndy Lutomirski pushq (%rdi) 6273e3b9293SAndy Lutomirski 6283e3b9293SAndy Lutomirski /* 6293e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 6303e3b9293SAndy Lutomirski * We can do future final exit work right here. 6313e3b9293SAndy Lutomirski */ 632afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 6333e3b9293SAndy Lutomirski 6346fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 6358a09317bSDave Hansen 6363e3b9293SAndy Lutomirski /* Restore RDI. */ 6373e3b9293SAndy Lutomirski popq %rdi 6386cf3e4c0SPeter Zijlstra swapgs 6398b87d8ceSPeter Zijlstra jmp .Lnative_iret 64026c4ef9cSAndy Lutomirski 641905a36a2SIngo Molnar 64226ba4e57SJiri SlabySYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) 64326c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 64426c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 6451e4c4f61SBorislav Petkov testb $3, CS(%rsp) 64626c4ef9cSAndy Lutomirski jz 1f 64726c4ef9cSAndy Lutomirski ud2 64826c4ef9cSAndy Lutomirski1: 64926c4ef9cSAndy Lutomirski#endif 650502af0d7SDominik Brodowski POP_REGS 651e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 65210bcc80eSMathieu Desnoyers /* 65310bcc80eSMathieu Desnoyers * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 65410bcc80eSMathieu Desnoyers * when returning from IPI handler. 65510bcc80eSMathieu Desnoyers */ 6568b87d8ceSPeter Zijlstra#ifdef CONFIG_XEN_PV 6578b87d8ceSPeter ZijlstraSYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL) 6588b87d8ceSPeter Zijlstra ANNOTATE_NOENDBR 6598b87d8ceSPeter Zijlstra .byte 0xe9 6608b87d8ceSPeter Zijlstra .long .Lnative_iret - (. + 4) 6618b87d8ceSPeter Zijlstra#endif 662905a36a2SIngo Molnar 6638b87d8ceSPeter Zijlstra.Lnative_iret: 6648c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 665905a36a2SIngo Molnar /* 666905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 667905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 668905a36a2SIngo Molnar */ 669905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 670905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 671905a36a2SIngo Molnar jnz native_irq_return_ldt 672905a36a2SIngo Molnar#endif 673905a36a2SIngo Molnar 674cc66936eSJiri SlabySYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) 6753e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // exc_double_fault 676905a36a2SIngo Molnar /* 677905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 678905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 679c29c775aSThomas Gleixner * Double-faults due to espfix64 are handled in exc_double_fault. 680905a36a2SIngo Molnar * Other faults here are fatal. 681905a36a2SIngo Molnar */ 682905a36a2SIngo Molnar iretq 683905a36a2SIngo Molnar 684905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 685905a36a2SIngo Molnarnative_irq_return_ldt: 68685063facSAndy Lutomirski /* 68785063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 68885063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 68985063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 69085063facSAndy Lutomirski * of the ESPFIX stack. 69185063facSAndy Lutomirski * 69285063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 69385063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 69485063facSAndy Lutomirski * 69585063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 69685063facSAndy Lutomirski * 69785063facSAndy Lutomirski * --- top of ESPFIX stack --- 69885063facSAndy Lutomirski * SS 69985063facSAndy Lutomirski * RSP 70085063facSAndy Lutomirski * RFLAGS 70185063facSAndy Lutomirski * CS 70285063facSAndy Lutomirski * RIP <-- RSP points here when we're done 70385063facSAndy Lutomirski * RAX <-- espfix_waddr points here 70485063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 70585063facSAndy Lutomirski */ 70685063facSAndy Lutomirski 70785063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 70853c9d924SJuergen Gross swapgs /* to kernel GS */ 7098a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 7108a09317bSDave Hansen 711905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 71285063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 71385063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 714905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 71585063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 716905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 71785063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 718905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 71985063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 720905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 72185063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 722905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 72385063facSAndy Lutomirski /* Now RAX == RSP. */ 72485063facSAndy Lutomirski 72585063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 72685063facSAndy Lutomirski 72785063facSAndy Lutomirski /* 72885063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 72985063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 73085063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 73185063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 73285063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 73385063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 73485063facSAndy Lutomirski */ 735905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 7368a09317bSDave Hansen 7376fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 73853c9d924SJuergen Gross swapgs /* to user GS */ 7398a09317bSDave Hansen popq %rdi /* Restore user RDI */ 7408a09317bSDave Hansen 741905a36a2SIngo Molnar movq %rax, %rsp 7428c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 74385063facSAndy Lutomirski 74485063facSAndy Lutomirski /* 74585063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 74685063facSAndy Lutomirski * still read. 74785063facSAndy Lutomirski */ 74885063facSAndy Lutomirski popq %rax /* Restore user RAX */ 74985063facSAndy Lutomirski 75085063facSAndy Lutomirski /* 75185063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 75285063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 75385063facSAndy Lutomirski * values. We can now IRET back to userspace. 75485063facSAndy Lutomirski */ 755905a36a2SIngo Molnar jmp native_irq_return_iret 756905a36a2SIngo Molnar#endif 757fa5e5c40SThomas GleixnerSYM_CODE_END(common_interrupt_return) 758fa5e5c40SThomas Gleixner_ASM_NOKPROBE(common_interrupt_return) 759905a36a2SIngo Molnar 760905a36a2SIngo Molnar/* 7614d732138SIngo Molnar * Reload gs selector with exception handling 7624d732138SIngo Molnar * edi: new selector 763b9f6976bSThomas Gleixner * 764b9f6976bSThomas Gleixner * Is in entry.text as it shouldn't be instrumented. 7654d732138SIngo Molnar */ 766410367e3SThomas GleixnerSYM_FUNC_START(asm_load_gs_index) 7678c1f7558SJosh Poimboeuf FRAME_BEGIN 768c9317202SThomas Gleixner swapgs 76942c748bbSBorislav Petkov.Lgs_change: 7703e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // error_entry 771905a36a2SIngo Molnar movl %edi, %gs 77296e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 773c9317202SThomas Gleixner swapgs 7748c1f7558SJosh Poimboeuf FRAME_END 775f94909ceSPeter Zijlstra RET 776905a36a2SIngo Molnar 777905a36a2SIngo Molnar /* running with kernelgs */ 77816e617d0SPeter Zijlstra.Lbad_gs: 779c9317202SThomas Gleixner swapgs /* switch back to user gs */ 780b038c842SAndy Lutomirski.macro ZAP_GS 781b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 782b038c842SAndy Lutomirski movl $__USER_DS, %eax 783b038c842SAndy Lutomirski movl %eax, %gs 784b038c842SAndy Lutomirski.endm 785b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 786905a36a2SIngo Molnar xorl %eax, %eax 787905a36a2SIngo Molnar movl %eax, %gs 788905a36a2SIngo Molnar jmp 2b 78916e617d0SPeter Zijlstra 79016e617d0SPeter Zijlstra _ASM_EXTABLE(.Lgs_change, .Lbad_gs) 79116e617d0SPeter Zijlstra 79216e617d0SPeter ZijlstraSYM_FUNC_END(asm_load_gs_index) 79316e617d0SPeter ZijlstraEXPORT_SYMBOL(asm_load_gs_index) 794905a36a2SIngo Molnar 79528c11b0fSJuergen Gross#ifdef CONFIG_XEN_PV 796905a36a2SIngo Molnar/* 797905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 798905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 799905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 800905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 801905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 802905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 803905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 804905a36a2SIngo Molnar * like to avoid the possibility. 805905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 806905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 807905a36a2SIngo Molnar * activation and restart the handler using the previous one. 8082f6474e4SThomas Gleixner * 8092f6474e4SThomas Gleixner * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) 810905a36a2SIngo Molnar */ 8112f6474e4SThomas GleixnerSYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) 8124d732138SIngo Molnar 813905a36a2SIngo Molnar/* 814905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 815905a36a2SIngo Molnar * see the correct pointer to the pt_regs 816905a36a2SIngo Molnar */ 8178c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 8184d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 8198c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 8201d3e53e8SAndy Lutomirski 8212f6474e4SThomas Gleixner call xen_pv_evtchn_do_upcall 8221d3e53e8SAndy Lutomirski 8232f6474e4SThomas Gleixner jmp error_return 8242f6474e4SThomas GleixnerSYM_CODE_END(exc_xen_hypervisor_callback) 825905a36a2SIngo Molnar 826905a36a2SIngo Molnar/* 827905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 828905a36a2SIngo Molnar * We get here for two reasons: 829905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 830905a36a2SIngo Molnar * 2. Fault while executing IRET 831905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 832905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 833905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 834905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 835905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 836905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 837905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 838905a36a2SIngo Molnar */ 839bc7b11c0SJiri SlabySYM_CODE_START(xen_failsafe_callback) 8408c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 8415b2fc515SPeter Zijlstra ENDBR 842905a36a2SIngo Molnar movl %ds, %ecx 843905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 844905a36a2SIngo Molnar jne 1f 845905a36a2SIngo Molnar movl %es, %ecx 846905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 847905a36a2SIngo Molnar jne 1f 848905a36a2SIngo Molnar movl %fs, %ecx 849905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 850905a36a2SIngo Molnar jne 1f 851905a36a2SIngo Molnar movl %gs, %ecx 852905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 853905a36a2SIngo Molnar jne 1f 854905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 855905a36a2SIngo Molnar movq (%rsp), %rcx 856905a36a2SIngo Molnar movq 8(%rsp), %r11 857905a36a2SIngo Molnar addq $0x30, %rsp 858905a36a2SIngo Molnar pushq $0 /* RIP */ 8598c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 860be4c11afSThomas Gleixner jmp asm_exc_general_protection 861905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 862905a36a2SIngo Molnar movq (%rsp), %rcx 863905a36a2SIngo Molnar movq 8(%rsp), %r11 864905a36a2SIngo Molnar addq $0x30, %rsp 8658c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 866905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 8673f01daecSDominik Brodowski PUSH_AND_CLEAR_REGS 868946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 869e88d9741SThomas Gleixner jmp error_return 870bc7b11c0SJiri SlabySYM_CODE_END(xen_failsafe_callback) 87128c11b0fSJuergen Gross#endif /* CONFIG_XEN_PV */ 872905a36a2SIngo Molnar 873905a36a2SIngo Molnar/* 874c82965f9SChang S. Bae * Save all registers in pt_regs. Return GSBASE related information 875c82965f9SChang S. Bae * in EBX depending on the availability of the FSGSBASE instructions: 876c82965f9SChang S. Bae * 877c82965f9SChang S. Bae * FSGSBASE R/EBX 878c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 879c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 880c82965f9SChang S. Bae * 881c82965f9SChang S. Bae * Y GSBASE value at entry, must be restored in paranoid_exit 882905a36a2SIngo Molnar */ 883ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_entry) 8848c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 885905a36a2SIngo Molnar cld 8869e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 8879e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 8888a09317bSDave Hansen 88916561f27SDave Hansen /* 89016561f27SDave Hansen * Always stash CR3 in %r14. This value will be restored, 891ae852495SAndy Lutomirski * verbatim, at exit. Needed if paranoid_entry interrupted 892ae852495SAndy Lutomirski * another entry that already switched to the user CR3 value 893ae852495SAndy Lutomirski * but has not yet returned to userspace. 89416561f27SDave Hansen * 89516561f27SDave Hansen * This is also why CS (stashed in the "iret frame" by the 89616561f27SDave Hansen * hardware at entry) can not be used: this may be a return 897ae852495SAndy Lutomirski * to kernel code, but with a user CR3 value. 89896b23714SChang S. Bae * 89996b23714SChang S. Bae * Switching CR3 does not depend on kernel GSBASE so it can 90096b23714SChang S. Bae * be done before switching to the kernel GSBASE. This is 90196b23714SChang S. Bae * required for FSGSBASE because the kernel GSBASE has to 90296b23714SChang S. Bae * be retrieved from a kernel internal table. 90316561f27SDave Hansen */ 9048a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 9058a09317bSDave Hansen 90618ec54fdSJosh Poimboeuf /* 907c82965f9SChang S. Bae * Handling GSBASE depends on the availability of FSGSBASE. 908c82965f9SChang S. Bae * 909c82965f9SChang S. Bae * Without FSGSBASE the kernel enforces that negative GSBASE 910c82965f9SChang S. Bae * values indicate kernel GSBASE. With FSGSBASE no assumptions 911c82965f9SChang S. Bae * can be made about the GSBASE value when entering from user 912c82965f9SChang S. Bae * space. 913c82965f9SChang S. Bae */ 914c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 915c82965f9SChang S. Bae 916c82965f9SChang S. Bae /* 917c82965f9SChang S. Bae * Read the current GSBASE and store it in %rbx unconditionally, 918c82965f9SChang S. Bae * retrieve and set the current CPUs kernel GSBASE. The stored value 919c82965f9SChang S. Bae * has to be restored in paranoid_exit unconditionally. 920c82965f9SChang S. Bae * 9210b2c605fSBorislav Petkov * The unconditional write to GS base below ensures that no subsequent 9220b2c605fSBorislav Petkov * loads based on a mispredicted GS base can happen, therefore no LFENCE 9230b2c605fSBorislav Petkov * is needed here. 924c82965f9SChang S. Bae */ 925c82965f9SChang S. Bae SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 926f94909ceSPeter Zijlstra RET 927c82965f9SChang S. Bae 928c82965f9SChang S. Bae.Lparanoid_entry_checkgs: 92996b23714SChang S. Bae /* EBX = 1 -> kernel GSBASE active, no restore required */ 93096b23714SChang S. Bae movl $1, %ebx 931c07e4555SLai Jiangshan 93296b23714SChang S. Bae /* 93396b23714SChang S. Bae * The kernel-enforced convention is a negative GSBASE indicates 93496b23714SChang S. Bae * a kernel value. No SWAPGS needed on entry and exit. 93596b23714SChang S. Bae */ 93696b23714SChang S. Bae movl $MSR_GS_BASE, %ecx 93796b23714SChang S. Bae rdmsr 93896b23714SChang S. Bae testl %edx, %edx 939c07e4555SLai Jiangshan js .Lparanoid_kernel_gsbase 94018ec54fdSJosh Poimboeuf 94196b23714SChang S. Bae /* EBX = 0 -> SWAPGS required on exit */ 94296b23714SChang S. Bae xorl %ebx, %ebx 943c07e4555SLai Jiangshan swapgs 944c07e4555SLai Jiangshan.Lparanoid_kernel_gsbase: 945c07e4555SLai Jiangshan 946c07e4555SLai Jiangshan FENCE_SWAPGS_KERNEL_ENTRY 947f94909ceSPeter Zijlstra RET 948ef1e0315SJiri SlabySYM_CODE_END(paranoid_entry) 949905a36a2SIngo Molnar 950905a36a2SIngo Molnar/* 951905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 952905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 953905a36a2SIngo Molnar * from kernel space. 954905a36a2SIngo Molnar * 955905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 956905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 957c82965f9SChang S. Bae * be complicated. Fortunately, there's no good reason to try 958c82965f9SChang S. Bae * to handle preemption here. 9594d732138SIngo Molnar * 960c82965f9SChang S. Bae * R/EBX contains the GSBASE related information depending on the 961c82965f9SChang S. Bae * availability of the FSGSBASE instructions: 962c82965f9SChang S. Bae * 963c82965f9SChang S. Bae * FSGSBASE R/EBX 964c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 965c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 966c82965f9SChang S. Bae * 967c82965f9SChang S. Bae * Y User space GSBASE, must be restored unconditionally 968905a36a2SIngo Molnar */ 969ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_exit) 9708c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 971c82965f9SChang S. Bae /* 972c82965f9SChang S. Bae * The order of operations is important. RESTORE_CR3 requires 973c82965f9SChang S. Bae * kernel GSBASE. 974c82965f9SChang S. Bae * 975c82965f9SChang S. Bae * NB to anyone to try to optimize this code: this code does 976c82965f9SChang S. Bae * not execute at all for exceptions from user mode. Those 977c82965f9SChang S. Bae * exceptions go through error_exit instead. 978c82965f9SChang S. Bae */ 979c82965f9SChang S. Bae RESTORE_CR3 scratch_reg=%rax save_reg=%r14 980c82965f9SChang S. Bae 981c82965f9SChang S. Bae /* Handle the three GSBASE cases */ 982c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE 983c82965f9SChang S. Bae 984c82965f9SChang S. Bae /* With FSGSBASE enabled, unconditionally restore GSBASE */ 985c82965f9SChang S. Bae wrgsbase %rbx 98645c08383SThomas Gleixner jmp restore_regs_and_return_to_kernel 987c82965f9SChang S. Bae 988c82965f9SChang S. Bae.Lparanoid_exit_checkgs: 989c82965f9SChang S. Bae /* On non-FSGSBASE systems, conditionally do SWAPGS */ 990c82965f9SChang S. Bae testl %ebx, %ebx 991c82965f9SChang S. Bae jnz restore_regs_and_return_to_kernel 992c82965f9SChang S. Bae 993c82965f9SChang S. Bae /* We are returning to a context with user GSBASE */ 99453c9d924SJuergen Gross swapgs 995e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 996ef1e0315SJiri SlabySYM_CODE_END(paranoid_exit) 997905a36a2SIngo Molnar 998905a36a2SIngo Molnar/* 999*ee774dacSLai Jiangshan * Switch GS and CR3 if needed. 1000905a36a2SIngo Molnar */ 1001ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_entry) 10029e809d15SDominik Brodowski UNWIND_HINT_FUNC 1003905a36a2SIngo Molnar cld 1004905a36a2SIngo Molnar testb $3, CS+8(%rsp) 1005cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 1006539f5113SAndy Lutomirski 1007cb6f64edSAndy Lutomirski /* 1008cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 1009cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 1010cb6f64edSAndy Lutomirski */ 1011905a36a2SIngo Molnar SWAPGS 101218ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10138a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 10148a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1015539f5113SAndy Lutomirski 1016520a7e80SLai Jiangshan leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ 1017cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 10187f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 10197f2590a1SAndy Lutomirski call sync_regs 1020f94909ceSPeter Zijlstra RET 102102bc7768SAndy Lutomirski 1022905a36a2SIngo Molnar /* 1023905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1024905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1025905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1026905a36a2SIngo Molnar * for these here too. 1027905a36a2SIngo Molnar */ 1028cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1029905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1030905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1031cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1032905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1033905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1034cb6f64edSAndy Lutomirski je .Lbstep_iret 103542c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 103618ec54fdSJosh Poimboeuf jne .Lerror_entry_done_lfence 1037539f5113SAndy Lutomirski 1038539f5113SAndy Lutomirski /* 103942c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1040539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 104142c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1042539f5113SAndy Lutomirski */ 10432fa5f04fSWanpeng Li SWAPGS 10441367afaaSLai Jiangshan 10451367afaaSLai Jiangshan /* 10461367afaaSLai Jiangshan * Issue an LFENCE to prevent GS speculation, regardless of whether it is a 10471367afaaSLai Jiangshan * kernel or user gsbase. 10481367afaaSLai Jiangshan */ 10491367afaaSLai Jiangshan.Lerror_entry_done_lfence: 10501367afaaSLai Jiangshan FENCE_SWAPGS_KERNEL_ENTRY 1051520a7e80SLai Jiangshan leaq 8(%rsp), %rax /* return pt_regs pointer */ 1052f94909ceSPeter Zijlstra RET 1053905a36a2SIngo Molnar 1054cb6f64edSAndy Lutomirski.Lbstep_iret: 1055905a36a2SIngo Molnar /* Fix truncated RIP */ 1056905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1057905a36a2SIngo Molnar /* fall through */ 1058905a36a2SIngo Molnar 1059cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1060539f5113SAndy Lutomirski /* 10618a09317bSDave Hansen * We came from an IRET to user mode, so we have user 10628a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 1063539f5113SAndy Lutomirski */ 1064905a36a2SIngo Molnar SWAPGS 106518ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10668a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1067539f5113SAndy Lutomirski 1068539f5113SAndy Lutomirski /* 1069539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1070b3681dd5SAndy Lutomirski * as if we faulted immediately after IRET. 1071539f5113SAndy Lutomirski */ 1072520a7e80SLai Jiangshan leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ 1073905a36a2SIngo Molnar call fixup_bad_iret 1074520a7e80SLai Jiangshan mov %rax, %rdi 1075cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1076ef1e0315SJiri SlabySYM_CODE_END(error_entry) 1077905a36a2SIngo Molnar 1078424c7d0aSThomas GleixnerSYM_CODE_START_LOCAL(error_return) 1079424c7d0aSThomas Gleixner UNWIND_HINT_REGS 1080424c7d0aSThomas Gleixner DEBUG_ENTRY_ASSERT_IRQS_OFF 1081424c7d0aSThomas Gleixner testb $3, CS(%rsp) 1082424c7d0aSThomas Gleixner jz restore_regs_and_return_to_kernel 1083424c7d0aSThomas Gleixner jmp swapgs_restore_regs_and_return_to_usermode 1084424c7d0aSThomas GleixnerSYM_CODE_END(error_return) 1085424c7d0aSThomas Gleixner 1086929bacecSAndy Lutomirski/* 1087929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1088929bacecSAndy Lutomirski * so we can use real assembly here. 10898a09317bSDave Hansen * 10908a09317bSDave Hansen * Registers: 10918a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 10928a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1093929bacecSAndy Lutomirski */ 10946271fef0SThomas GleixnerSYM_CODE_START(asm_exc_nmi) 10958c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 10968f93402bSPeter Zijlstra ENDBR 1097929bacecSAndy Lutomirski 1098fc57a7c6SAndy Lutomirski /* 1099905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1100905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1101905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1102905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1103905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1104905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1105905a36a2SIngo Molnar * anyway. 1106905a36a2SIngo Molnar * 1107905a36a2SIngo Molnar * To handle this case we do the following: 1108905a36a2SIngo Molnar * Check the a special location on the stack that contains 1109905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1110905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1111905a36a2SIngo Molnar * is an NMI stack. 1112905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1113905a36a2SIngo Molnar * stack then: 1114905a36a2SIngo Molnar * o Set the special variable on the stack 11150b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 11160b22930eSAndy Lutomirski * stack 11170b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1118905a36a2SIngo Molnar * o Continue processing the NMI 1119905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 11200b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1121905a36a2SIngo Molnar * o return back to the first NMI 1122905a36a2SIngo Molnar * 1123905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1124905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1125905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1126905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1127905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1128905a36a2SIngo Molnar * NMI. 11299b6e6a83SAndy Lutomirski * 11309b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 11319b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 11329b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 11339b6e6a83SAndy Lutomirski * other IST entries. 1134905a36a2SIngo Molnar */ 1135905a36a2SIngo Molnar 1136e93c1730SAndy Lutomirski ASM_CLAC 1137e93c1730SAndy Lutomirski 1138905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1139905a36a2SIngo Molnar pushq %rdx 1140905a36a2SIngo Molnar 11419b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 11429b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1143905a36a2SIngo Molnar 1144905a36a2SIngo Molnar /* 11459b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 11469b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 11479b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 11489b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 11499b6e6a83SAndy Lutomirski * are off. 115083c133cfSAndy Lutomirski * 115183c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 115283c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 11539b6e6a83SAndy Lutomirski */ 11549b6e6a83SAndy Lutomirski 1155929bacecSAndy Lutomirski swapgs 11569b6e6a83SAndy Lutomirski cld 115718ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 11588a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 11599b6e6a83SAndy Lutomirski movq %rsp, %rdx 11609b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 11618c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 11629b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 11639b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 11649b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 11659b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 11669b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 11678c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 11689b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 116930907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rdx=(%rdx) 1170946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 11719b6e6a83SAndy Lutomirski 11729b6e6a83SAndy Lutomirski /* 11739b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 11749b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 11759b6e6a83SAndy Lutomirski * done with the NMI stack. 11769b6e6a83SAndy Lutomirski */ 11779b6e6a83SAndy Lutomirski 11789b6e6a83SAndy Lutomirski movq %rsp, %rdi 11799b6e6a83SAndy Lutomirski movq $-1, %rsi 11806271fef0SThomas Gleixner call exc_nmi 11819b6e6a83SAndy Lutomirski 11829b6e6a83SAndy Lutomirski /* 11839b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1184946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 11859b6e6a83SAndy Lutomirski */ 11868a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 11879b6e6a83SAndy Lutomirski 11889b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 11899b6e6a83SAndy Lutomirski /* 11900b22930eSAndy Lutomirski * Here's what our stack frame will look like: 11910b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11920b22930eSAndy Lutomirski * | original SS | 11930b22930eSAndy Lutomirski * | original Return RSP | 11940b22930eSAndy Lutomirski * | original RFLAGS | 11950b22930eSAndy Lutomirski * | original CS | 11960b22930eSAndy Lutomirski * | original RIP | 11970b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11980b22930eSAndy Lutomirski * | temp storage for rdx | 11990b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12000b22930eSAndy Lutomirski * | "NMI executing" variable | 12010b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12020b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 12030b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 12040b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 12050b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 12060b22930eSAndy Lutomirski * | iret RIP } | 12070b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12080b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 12090b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 12100b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 12110b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 12120b22930eSAndy Lutomirski * | outermost RIP } iteration. | 12130b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12140b22930eSAndy Lutomirski * | pt_regs | 12150b22930eSAndy Lutomirski * +---------------------------------------------------------+ 12160b22930eSAndy Lutomirski * 12170b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 12180b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 12190b22930eSAndy Lutomirski * space for the asm code here. 12200b22930eSAndy Lutomirski * 12210b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 12220b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 12230b22930eSAndy Lutomirski * processing. 12240b22930eSAndy Lutomirski * 12250b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 12260b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 12270b22930eSAndy Lutomirski * frame pointing to the final return target. 12280b22930eSAndy Lutomirski */ 12290b22930eSAndy Lutomirski 12300b22930eSAndy Lutomirski /* 12310b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 12320b22930eSAndy Lutomirski * 1233a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1234a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1235a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1236a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 12376271fef0SThomas Gleixner * about to about to call exc_nmi() anyway, so we can just 1238a27507caSAndy Lutomirski * resume the outer NMI. 1239a27507caSAndy Lutomirski */ 1240a27507caSAndy Lutomirski 1241a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1242a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1243a27507caSAndy Lutomirski ja 1f 1244a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1245a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1246a27507caSAndy Lutomirski ja nested_nmi_out 1247a27507caSAndy Lutomirski1: 1248a27507caSAndy Lutomirski 1249a27507caSAndy Lutomirski /* 1250a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 12510b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 12520b22930eSAndy Lutomirski * before IRET. 1253905a36a2SIngo Molnar */ 1254905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1255905a36a2SIngo Molnar je nested_nmi 1256905a36a2SIngo Molnar 1257905a36a2SIngo Molnar /* 12580b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 12590b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1260810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1261810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1262810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1263810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1264810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1265810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1266810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1267810bc075SAndy Lutomirski * "NMI executing". 1268905a36a2SIngo Molnar */ 1269905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1270905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1271905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1272905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1273905a36a2SIngo Molnar ja first_nmi 12744d732138SIngo Molnar 1275905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1276905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1277905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1278905a36a2SIngo Molnar jb first_nmi 1279810bc075SAndy Lutomirski 1280810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1281810bc075SAndy Lutomirski 1282810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1283810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1284810bc075SAndy Lutomirski 1285810bc075SAndy Lutomirski /* This is a nested NMI. */ 1286905a36a2SIngo Molnar 1287905a36a2SIngo Molnarnested_nmi: 1288905a36a2SIngo Molnar /* 12890b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 12900b22930eSAndy Lutomirski * iteration of NMI handling. 1291905a36a2SIngo Molnar */ 129223a781e9SAndy Lutomirski subq $8, %rsp 1293905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1294905a36a2SIngo Molnar pushq $__KERNEL_DS 1295905a36a2SIngo Molnar pushq %rdx 1296905a36a2SIngo Molnar pushfq 1297905a36a2SIngo Molnar pushq $__KERNEL_CS 1298905a36a2SIngo Molnar pushq $repeat_nmi 1299905a36a2SIngo Molnar 1300905a36a2SIngo Molnar /* Put stack back */ 1301905a36a2SIngo Molnar addq $(6*8), %rsp 1302905a36a2SIngo Molnar 1303905a36a2SIngo Molnarnested_nmi_out: 1304905a36a2SIngo Molnar popq %rdx 1305905a36a2SIngo Molnar 13060b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1307929bacecSAndy Lutomirski iretq 1308905a36a2SIngo Molnar 1309905a36a2SIngo Molnarfirst_nmi: 13100b22930eSAndy Lutomirski /* Restore rdx. */ 1311905a36a2SIngo Molnar movq (%rsp), %rdx 1312905a36a2SIngo Molnar 131336f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 131436f1a77bSAndy Lutomirski pushq $0 1315905a36a2SIngo Molnar 13160b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1317905a36a2SIngo Molnar subq $(5*8), %rsp 1318905a36a2SIngo Molnar 13190b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1320905a36a2SIngo Molnar .rept 5 1321905a36a2SIngo Molnar pushq 11*8(%rsp) 1322905a36a2SIngo Molnar .endr 13238c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1324905a36a2SIngo Molnar 1325905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1326905a36a2SIngo Molnar 1327a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1328a97439aaSAndy Lutomirski /* 1329a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1330a97439aaSAndy Lutomirski * default because IRET is very expensive. 1331a97439aaSAndy Lutomirski */ 1332a97439aaSAndy Lutomirski pushq $0 /* SS */ 1333a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1334a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1335a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1336a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1337a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1338929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 13398c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1340a97439aaSAndy Lutomirski1: 1341a97439aaSAndy Lutomirski#endif 1342a97439aaSAndy Lutomirski 13430b22930eSAndy Lutomirskirepeat_nmi: 13443e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // this code 1345905a36a2SIngo Molnar /* 1346905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1347905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1348905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1349905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1350905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1351905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1352905a36a2SIngo Molnar * NMI will update. 13530b22930eSAndy Lutomirski * 13540b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 13550b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 13560b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 13576271fef0SThomas Gleixner * gsbase if needed before we call exc_nmi(). "NMI executing" 135836f1a77bSAndy Lutomirski * is zero. 1359905a36a2SIngo Molnar */ 136036f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1361905a36a2SIngo Molnar 13620b22930eSAndy Lutomirski /* 13630b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 13640b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 13650b22930eSAndy Lutomirski * it or it will end up containing garbage. 13660b22930eSAndy Lutomirski */ 1367905a36a2SIngo Molnar addq $(10*8), %rsp 1368905a36a2SIngo Molnar .rept 5 1369905a36a2SIngo Molnar pushq -6*8(%rsp) 1370905a36a2SIngo Molnar .endr 1371905a36a2SIngo Molnar subq $(5*8), %rsp 1372905a36a2SIngo Molnarend_repeat_nmi: 13733e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // this code 1374905a36a2SIngo Molnar 1375905a36a2SIngo Molnar /* 13760b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 13770b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 13780b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1379905a36a2SIngo Molnar */ 1380905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1381905a36a2SIngo Molnar 1382905a36a2SIngo Molnar /* 1383905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1384905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1385905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1386905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1387905a36a2SIngo Molnar * exceptions might do. 1388905a36a2SIngo Molnar */ 1389905a36a2SIngo Molnar call paranoid_entry 13908c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1391905a36a2SIngo Molnar 1392905a36a2SIngo Molnar movq %rsp, %rdi 1393905a36a2SIngo Molnar movq $-1, %rsi 13946271fef0SThomas Gleixner call exc_nmi 1395905a36a2SIngo Molnar 139616561f27SDave Hansen /* Always restore stashed CR3 value (see paranoid_entry) */ 139721e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 13988a09317bSDave Hansen 1399c82965f9SChang S. Bae /* 1400c82965f9SChang S. Bae * The above invocation of paranoid_entry stored the GSBASE 1401c82965f9SChang S. Bae * related information in R/EBX depending on the availability 1402c82965f9SChang S. Bae * of FSGSBASE. 1403c82965f9SChang S. Bae * 1404c82965f9SChang S. Bae * If FSGSBASE is enabled, restore the saved GSBASE value 1405c82965f9SChang S. Bae * unconditionally, otherwise take the conditional SWAPGS path. 1406c82965f9SChang S. Bae */ 1407c82965f9SChang S. Bae ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1408c82965f9SChang S. Bae 1409c82965f9SChang S. Bae wrgsbase %rbx 1410c82965f9SChang S. Bae jmp nmi_restore 1411c82965f9SChang S. Bae 1412c82965f9SChang S. Baenmi_no_fsgsbase: 1413c82965f9SChang S. Bae /* EBX == 0 -> invoke SWAPGS */ 1414c82965f9SChang S. Bae testl %ebx, %ebx 1415905a36a2SIngo Molnar jnz nmi_restore 1416c82965f9SChang S. Bae 1417905a36a2SIngo Molnarnmi_swapgs: 141853c9d924SJuergen Gross swapgs 1419c82965f9SChang S. Bae 1420905a36a2SIngo Molnarnmi_restore: 1421502af0d7SDominik Brodowski POP_REGS 14220b22930eSAndy Lutomirski 1423471ee483SAndy Lutomirski /* 1424471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1425471ee483SAndy Lutomirski * at the "iret" frame. 1426471ee483SAndy Lutomirski */ 1427471ee483SAndy Lutomirski addq $6*8, %rsp 1428905a36a2SIngo Molnar 1429810bc075SAndy Lutomirski /* 1430810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1431810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1432929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1433929bacecSAndy Lutomirski * 1434929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1435929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1436929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1437810bc075SAndy Lutomirski */ 1438810bc075SAndy Lutomirski std 1439810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 14400b22930eSAndy Lutomirski 14410b22930eSAndy Lutomirski /* 1442929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1443929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1444929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1445929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 14460b22930eSAndy Lutomirski */ 1447929bacecSAndy Lutomirski iretq 14486271fef0SThomas GleixnerSYM_CODE_END(asm_exc_nmi) 1449905a36a2SIngo Molnar 1450dffb3f9dSAndy Lutomirski#ifndef CONFIG_IA32_EMULATION 1451dffb3f9dSAndy Lutomirski/* 1452dffb3f9dSAndy Lutomirski * This handles SYSCALL from 32-bit code. There is no way to program 1453dffb3f9dSAndy Lutomirski * MSRs to fully disable 32-bit SYSCALL. 1454dffb3f9dSAndy Lutomirski */ 1455bc7b11c0SJiri SlabySYM_CODE_START(ignore_sysret) 14568c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 14578f93402bSPeter Zijlstra ENDBR 1458905a36a2SIngo Molnar mov $-ENOSYS, %eax 1459b2b1d94cSJan Beulich sysretl 1460bc7b11c0SJiri SlabySYM_CODE_END(ignore_sysret) 1461dffb3f9dSAndy Lutomirski#endif 14622deb4be2SAndy Lutomirski 1463b9f6976bSThomas Gleixner.pushsection .text, "ax" 14640e25498fSEric W. BiedermanSYM_CODE_START(rewind_stack_and_make_dead) 14658c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 14662deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 14672deb4be2SAndy Lutomirski xorl %ebp, %ebp 14682deb4be2SAndy Lutomirski 14692deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 14708c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 1471f977df7bSJann Horn UNWIND_HINT_REGS 14722deb4be2SAndy Lutomirski 14730e25498fSEric W. Biederman call make_task_dead 14740e25498fSEric W. BiedermanSYM_CODE_END(rewind_stack_and_make_dead) 1475b9f6976bSThomas Gleixner.popsection 1476