1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2905a36a2SIngo Molnar/* 3905a36a2SIngo Molnar * linux/arch/x86_64/entry.S 4905a36a2SIngo Molnar * 5905a36a2SIngo Molnar * Copyright (C) 1991, 1992 Linus Torvalds 6905a36a2SIngo Molnar * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7905a36a2SIngo Molnar * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 84d732138SIngo Molnar * 9905a36a2SIngo Molnar * entry.S contains the system-call and fault low-level handling routines. 10905a36a2SIngo Molnar * 11cb1aaebeSMauro Carvalho Chehab * Some of this is documented in Documentation/x86/entry_64.rst 12905a36a2SIngo Molnar * 13905a36a2SIngo Molnar * A note on terminology: 14905a36a2SIngo Molnar * - iret frame: Architecture defined interrupt frame from SS to RIP 15905a36a2SIngo Molnar * at the top of the kernel process stack. 16905a36a2SIngo Molnar * 17905a36a2SIngo Molnar * Some macro usage: 186dcc5627SJiri Slaby * - SYM_FUNC_START/END:Define functions in the symbol table. 194d732138SIngo Molnar * - idtentry: Define exception entry points. 20905a36a2SIngo Molnar */ 21905a36a2SIngo Molnar#include <linux/linkage.h> 22905a36a2SIngo Molnar#include <asm/segment.h> 23905a36a2SIngo Molnar#include <asm/cache.h> 24905a36a2SIngo Molnar#include <asm/errno.h> 25905a36a2SIngo Molnar#include <asm/asm-offsets.h> 26905a36a2SIngo Molnar#include <asm/msr.h> 27905a36a2SIngo Molnar#include <asm/unistd.h> 28905a36a2SIngo Molnar#include <asm/thread_info.h> 29905a36a2SIngo Molnar#include <asm/hw_irq.h> 30905a36a2SIngo Molnar#include <asm/page_types.h> 31905a36a2SIngo Molnar#include <asm/irqflags.h> 32905a36a2SIngo Molnar#include <asm/paravirt.h> 33905a36a2SIngo Molnar#include <asm/percpu.h> 34905a36a2SIngo Molnar#include <asm/asm.h> 35905a36a2SIngo Molnar#include <asm/smap.h> 36905a36a2SIngo Molnar#include <asm/pgtable_types.h> 37784d5699SAl Viro#include <asm/export.h> 388c1f7558SJosh Poimboeuf#include <asm/frame.h> 39cfa82a00SThomas Gleixner#include <asm/trapnr.h> 402641f08bSDavid Woodhouse#include <asm/nospec-branch.h> 41c82965f9SChang S. Bae#include <asm/fsgsbase.h> 42905a36a2SIngo Molnar#include <linux/err.h> 43905a36a2SIngo Molnar 446fd166aaSPeter Zijlstra#include "calling.h" 456fd166aaSPeter Zijlstra 46905a36a2SIngo Molnar.code64 47905a36a2SIngo Molnar.section .entry.text, "ax" 48905a36a2SIngo Molnar 49905a36a2SIngo Molnar/* 504d732138SIngo Molnar * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 51905a36a2SIngo Molnar * 52fda57b22SAndy Lutomirski * This is the only entry point used for 64-bit system calls. The 53fda57b22SAndy Lutomirski * hardware interface is reasonably well designed and the register to 54fda57b22SAndy Lutomirski * argument mapping Linux uses fits well with the registers that are 55fda57b22SAndy Lutomirski * available when SYSCALL is used. 56fda57b22SAndy Lutomirski * 57fda57b22SAndy Lutomirski * SYSCALL instructions can be found inlined in libc implementations as 58fda57b22SAndy Lutomirski * well as some other programs and libraries. There are also a handful 59fda57b22SAndy Lutomirski * of SYSCALL instructions in the vDSO used, for example, as a 60fda57b22SAndy Lutomirski * clock_gettimeofday fallback. 61fda57b22SAndy Lutomirski * 624d732138SIngo Molnar * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 63905a36a2SIngo Molnar * then loads new ss, cs, and rip from previously programmed MSRs. 64905a36a2SIngo Molnar * rflags gets masked by a value from another MSR (so CLD and CLAC 65905a36a2SIngo Molnar * are not needed). SYSCALL does not save anything on the stack 66905a36a2SIngo Molnar * and does not change rsp. 67905a36a2SIngo Molnar * 68905a36a2SIngo Molnar * Registers on entry: 69905a36a2SIngo Molnar * rax system call number 70905a36a2SIngo Molnar * rcx return address 71905a36a2SIngo Molnar * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 72905a36a2SIngo Molnar * rdi arg0 73905a36a2SIngo Molnar * rsi arg1 74905a36a2SIngo Molnar * rdx arg2 75905a36a2SIngo Molnar * r10 arg3 (needs to be moved to rcx to conform to C ABI) 76905a36a2SIngo Molnar * r8 arg4 77905a36a2SIngo Molnar * r9 arg5 78905a36a2SIngo Molnar * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 79905a36a2SIngo Molnar * 80905a36a2SIngo Molnar * Only called from user space. 81905a36a2SIngo Molnar * 82905a36a2SIngo Molnar * When user can change pt_regs->foo always force IRET. That is because 83905a36a2SIngo Molnar * it deals with uncanonical addresses better. SYSRET has trouble 84905a36a2SIngo Molnar * with them due to bugs in both AMD and Intel CPUs. 85905a36a2SIngo Molnar */ 86905a36a2SIngo Molnar 87bc7b11c0SJiri SlabySYM_CODE_START(entry_SYSCALL_64) 888c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 89*8f93402bSPeter Zijlstra ENDBR 90905a36a2SIngo Molnar 918a9949bcSAndy Lutomirski swapgs 92bf904d27SAndy Lutomirski /* tss.sp2 is scratch space. */ 9398f05b51SAndy Lutomirski movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 94bf904d27SAndy Lutomirski SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 95905a36a2SIngo Molnar movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 96905a36a2SIngo Molnar 97a13644f3SJoerg RoedelSYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) 98a13644f3SJoerg Roedel 99905a36a2SIngo Molnar /* Construct struct pt_regs on stack */ 100905a36a2SIngo Molnar pushq $__USER_DS /* pt_regs->ss */ 10198f05b51SAndy Lutomirski pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 102905a36a2SIngo Molnar pushq %r11 /* pt_regs->flags */ 103905a36a2SIngo Molnar pushq $__USER_CS /* pt_regs->cs */ 104905a36a2SIngo Molnar pushq %rcx /* pt_regs->ip */ 10526ba4e57SJiri SlabySYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) 106905a36a2SIngo Molnar pushq %rax /* pt_regs->orig_ax */ 10730907fd1SDominik Brodowski 10830907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rax=$-ENOSYS 109905a36a2SIngo Molnar 1101e423bffSAndy Lutomirski /* IRQs are off. */ 1113e5e7f77SH. Peter Anvin (Intel) movq %rsp, %rdi 11205954948SH. Peter Anvin (Intel) /* Sign extend the lower 32bit as syscall numbers are treated as int */ 11305954948SH. Peter Anvin (Intel) movslq %eax, %rsi 1141e423bffSAndy Lutomirski call do_syscall_64 /* returns with IRQs disabled */ 1151e423bffSAndy Lutomirski 116905a36a2SIngo Molnar /* 117905a36a2SIngo Molnar * Try to use SYSRET instead of IRET if we're returning to 1188a055d7fSAndy Lutomirski * a completely clean 64-bit userspace context. If we're not, 1198a055d7fSAndy Lutomirski * go to the slow exit path. 120afd30525SJuergen Gross * In the Xen PV case we must use iret anyway. 121905a36a2SIngo Molnar */ 122afd30525SJuergen Gross 123afd30525SJuergen Gross ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ 124afd30525SJuergen Gross X86_FEATURE_XENPV 125afd30525SJuergen Gross 126905a36a2SIngo Molnar movq RCX(%rsp), %rcx 127905a36a2SIngo Molnar movq RIP(%rsp), %r11 1288a055d7fSAndy Lutomirski 1298a055d7fSAndy Lutomirski cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 1308a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 131905a36a2SIngo Molnar 132905a36a2SIngo Molnar /* 133905a36a2SIngo Molnar * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 134905a36a2SIngo Molnar * in kernel space. This essentially lets the user take over 135905a36a2SIngo Molnar * the kernel, since userspace controls RSP. 136905a36a2SIngo Molnar * 137905a36a2SIngo Molnar * If width of "canonical tail" ever becomes variable, this will need 138905a36a2SIngo Molnar * to be updated to remain correct on both old and new CPUs. 139361b4b58SKirill A. Shutemov * 140cbe0317bSKirill A. Shutemov * Change top bits to match most significant bit (47th or 56th bit 141cbe0317bSKirill A. Shutemov * depending on paging mode) in the address. 142905a36a2SIngo Molnar */ 14309e61a77SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 14439b95522SKirill A. Shutemov ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 14539b95522SKirill A. Shutemov "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 14609e61a77SKirill A. Shutemov#else 147905a36a2SIngo Molnar shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 148905a36a2SIngo Molnar sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 14909e61a77SKirill A. Shutemov#endif 1504d732138SIngo Molnar 151905a36a2SIngo Molnar /* If this changed %rcx, it was not canonical */ 152905a36a2SIngo Molnar cmpq %rcx, %r11 1538a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 154905a36a2SIngo Molnar 155905a36a2SIngo Molnar cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 1568a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 157905a36a2SIngo Molnar 158905a36a2SIngo Molnar movq R11(%rsp), %r11 159905a36a2SIngo Molnar cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 1608a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 161905a36a2SIngo Molnar 162905a36a2SIngo Molnar /* 1633e035305SBorislav Petkov * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 1643e035305SBorislav Petkov * restore RF properly. If the slowpath sets it for whatever reason, we 1653e035305SBorislav Petkov * need to restore it correctly. 1663e035305SBorislav Petkov * 1673e035305SBorislav Petkov * SYSRET can restore TF, but unlike IRET, restoring TF results in a 1683e035305SBorislav Petkov * trap from userspace immediately after SYSRET. This would cause an 1693e035305SBorislav Petkov * infinite loop whenever #DB happens with register state that satisfies 1703e035305SBorislav Petkov * the opportunistic SYSRET conditions. For example, single-stepping 1713e035305SBorislav Petkov * this user code: 172905a36a2SIngo Molnar * 173905a36a2SIngo Molnar * movq $stuck_here, %rcx 174905a36a2SIngo Molnar * pushfq 175905a36a2SIngo Molnar * popq %r11 176905a36a2SIngo Molnar * stuck_here: 177905a36a2SIngo Molnar * 178905a36a2SIngo Molnar * would never get past 'stuck_here'. 179905a36a2SIngo Molnar */ 180905a36a2SIngo Molnar testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 1818a055d7fSAndy Lutomirski jnz swapgs_restore_regs_and_return_to_usermode 182905a36a2SIngo Molnar 183905a36a2SIngo Molnar /* nothing to check for RSP */ 184905a36a2SIngo Molnar 185905a36a2SIngo Molnar cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 1868a055d7fSAndy Lutomirski jne swapgs_restore_regs_and_return_to_usermode 187905a36a2SIngo Molnar 188905a36a2SIngo Molnar /* 189905a36a2SIngo Molnar * We win! This label is here just for ease of understanding 190905a36a2SIngo Molnar * perf profiles. Nothing jumps here. 191905a36a2SIngo Molnar */ 192905a36a2SIngo Molnarsyscall_return_via_sysret: 193905a36a2SIngo Molnar /* rcx and r11 are already restored (see code above) */ 194502af0d7SDominik Brodowski POP_REGS pop_rdi=0 skip_r11rcx=1 1953e3b9293SAndy Lutomirski 1963e3b9293SAndy Lutomirski /* 1973e3b9293SAndy Lutomirski * Now all regs are restored except RSP and RDI. 1983e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 1993e3b9293SAndy Lutomirski */ 2003e3b9293SAndy Lutomirski movq %rsp, %rdi 201c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 2021fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 2033e3b9293SAndy Lutomirski 2043e3b9293SAndy Lutomirski pushq RSP-RDI(%rdi) /* RSP */ 2053e3b9293SAndy Lutomirski pushq (%rdi) /* RDI */ 2063e3b9293SAndy Lutomirski 2073e3b9293SAndy Lutomirski /* 2083e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 2093e3b9293SAndy Lutomirski * We can do future final exit work right here. 2103e3b9293SAndy Lutomirski */ 211afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 212afaef01cSAlexander Popov 2136fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 2143e3b9293SAndy Lutomirski 2154fbb3910SAndy Lutomirski popq %rdi 2163e3b9293SAndy Lutomirski popq %rsp 217afd30525SJuergen Gross swapgs 218afd30525SJuergen Gross sysretq 219bc7b11c0SJiri SlabySYM_CODE_END(entry_SYSCALL_64) 220905a36a2SIngo Molnar 221905a36a2SIngo Molnar/* 2220100301bSBrian Gerst * %rdi: prev task 2230100301bSBrian Gerst * %rsi: next task 2240100301bSBrian Gerst */ 225b9f6976bSThomas Gleixner.pushsection .text, "ax" 22696c64806SJosh PoimboeufSYM_FUNC_START(__switch_to_asm) 2270100301bSBrian Gerst /* 2280100301bSBrian Gerst * Save callee-saved registers 2290100301bSBrian Gerst * This must match the order in inactive_task_frame 2300100301bSBrian Gerst */ 2310100301bSBrian Gerst pushq %rbp 2320100301bSBrian Gerst pushq %rbx 2330100301bSBrian Gerst pushq %r12 2340100301bSBrian Gerst pushq %r13 2350100301bSBrian Gerst pushq %r14 2360100301bSBrian Gerst pushq %r15 2370100301bSBrian Gerst 2380100301bSBrian Gerst /* switch stack */ 2390100301bSBrian Gerst movq %rsp, TASK_threadsp(%rdi) 2400100301bSBrian Gerst movq TASK_threadsp(%rsi), %rsp 2410100301bSBrian Gerst 242050e9baaSLinus Torvalds#ifdef CONFIG_STACKPROTECTOR 2430100301bSBrian Gerst movq TASK_stack_canary(%rsi), %rbx 244e6401c13SAndy Lutomirski movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 2450100301bSBrian Gerst#endif 2460100301bSBrian Gerst 247c995efd5SDavid Woodhouse#ifdef CONFIG_RETPOLINE 248c995efd5SDavid Woodhouse /* 249c995efd5SDavid Woodhouse * When switching from a shallower to a deeper call stack 250c995efd5SDavid Woodhouse * the RSB may either underflow or use entries populated 251c995efd5SDavid Woodhouse * with userspace addresses. On CPUs where those concerns 252c995efd5SDavid Woodhouse * exist, overwrite the RSB with entries which capture 253c995efd5SDavid Woodhouse * speculative execution to prevent attack. 254c995efd5SDavid Woodhouse */ 255d1c99108SDavid Woodhouse FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 256c995efd5SDavid Woodhouse#endif 257c995efd5SDavid Woodhouse 2580100301bSBrian Gerst /* restore callee-saved registers */ 2590100301bSBrian Gerst popq %r15 2600100301bSBrian Gerst popq %r14 2610100301bSBrian Gerst popq %r13 2620100301bSBrian Gerst popq %r12 2630100301bSBrian Gerst popq %rbx 2640100301bSBrian Gerst popq %rbp 2650100301bSBrian Gerst 2660100301bSBrian Gerst jmp __switch_to 26796c64806SJosh PoimboeufSYM_FUNC_END(__switch_to_asm) 268b9f6976bSThomas Gleixner.popsection 2690100301bSBrian Gerst 2700100301bSBrian Gerst/* 271905a36a2SIngo Molnar * A newly forked process directly context switches into this address. 272905a36a2SIngo Molnar * 2730100301bSBrian Gerst * rax: prev task we switched from 274616d2483SBrian Gerst * rbx: kernel thread func (NULL for user thread) 275616d2483SBrian Gerst * r12: kernel thread arg 276905a36a2SIngo Molnar */ 277b9f6976bSThomas Gleixner.pushsection .text, "ax" 278bc7b11c0SJiri SlabySYM_CODE_START(ret_from_fork) 2798c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 2800100301bSBrian Gerst movq %rax, %rdi 2814d732138SIngo Molnar call schedule_tail /* rdi: 'prev' task parameter */ 282905a36a2SIngo Molnar 283616d2483SBrian Gerst testq %rbx, %rbx /* from kernel_thread? */ 284616d2483SBrian Gerst jnz 1f /* kernel threads are uncommon */ 285905a36a2SIngo Molnar 286616d2483SBrian Gerst2: 2878c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 288ebd57499SJosh Poimboeuf movq %rsp, %rdi 289167fd210SThomas Gleixner call syscall_exit_to_user_mode /* returns with IRQs disabled */ 2908a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 291616d2483SBrian Gerst 292616d2483SBrian Gerst1: 293616d2483SBrian Gerst /* kernel thread */ 294d31a5802SJosh Poimboeuf UNWIND_HINT_EMPTY 295616d2483SBrian Gerst movq %r12, %rdi 29634fdce69SPeter Zijlstra CALL_NOSPEC rbx 297616d2483SBrian Gerst /* 298616d2483SBrian Gerst * A kernel thread is allowed to return here after successfully 299be619f7fSEric W. Biederman * calling kernel_execve(). Exit to userspace to complete the execve() 300616d2483SBrian Gerst * syscall. 301616d2483SBrian Gerst */ 302616d2483SBrian Gerst movq $0, RAX(%rsp) 303616d2483SBrian Gerst jmp 2b 304bc7b11c0SJiri SlabySYM_CODE_END(ret_from_fork) 305b9f6976bSThomas Gleixner.popsection 306905a36a2SIngo Molnar 3071d3e53e8SAndy Lutomirski.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 3081d3e53e8SAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 309e17f8234SBoris Ostrovsky pushq %rax 310fafe5e74SJuergen Gross SAVE_FLAGS 311e17f8234SBoris Ostrovsky testl $X86_EFLAGS_IF, %eax 3121d3e53e8SAndy Lutomirski jz .Lokay_\@ 3131d3e53e8SAndy Lutomirski ud2 3141d3e53e8SAndy Lutomirski.Lokay_\@: 315e17f8234SBoris Ostrovsky popq %rax 3161d3e53e8SAndy Lutomirski#endif 3171d3e53e8SAndy Lutomirski.endm 3181d3e53e8SAndy Lutomirski 319cfa82a00SThomas Gleixner/** 320cfa82a00SThomas Gleixner * idtentry_body - Macro to emit code calling the C function 321cfa82a00SThomas Gleixner * @cfunc: C function to be called 322cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 323cfa82a00SThomas Gleixner */ 324e2dcb5f1SThomas Gleixner.macro idtentry_body cfunc has_error_code:req 325cfa82a00SThomas Gleixner 326cfa82a00SThomas Gleixner call error_entry 327cfa82a00SThomas Gleixner UNWIND_HINT_REGS 328cfa82a00SThomas Gleixner 329cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ 330cfa82a00SThomas Gleixner 331cfa82a00SThomas Gleixner .if \has_error_code == 1 332cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 333cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 334cfa82a00SThomas Gleixner .endif 335cfa82a00SThomas Gleixner 336cfa82a00SThomas Gleixner call \cfunc 337cfa82a00SThomas Gleixner 338424c7d0aSThomas Gleixner jmp error_return 339cfa82a00SThomas Gleixner.endm 340cfa82a00SThomas Gleixner 341cfa82a00SThomas Gleixner/** 342cfa82a00SThomas Gleixner * idtentry - Macro to generate entry stubs for simple IDT entries 343cfa82a00SThomas Gleixner * @vector: Vector number 344cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 345cfa82a00SThomas Gleixner * @cfunc: C function to be called 346cfa82a00SThomas Gleixner * @has_error_code: Hardware pushed error code on stack 347cfa82a00SThomas Gleixner * 348cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for straight forward 349cfa82a00SThomas Gleixner * and simple IDT entries. No IST stack, no paranoid entry checks. 350cfa82a00SThomas Gleixner */ 351e2dcb5f1SThomas Gleixner.macro idtentry vector asmsym cfunc has_error_code:req 352cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 353cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=\has_error_code*8 354*8f93402bSPeter Zijlstra ENDBR 355cfa82a00SThomas Gleixner ASM_CLAC 356cfa82a00SThomas Gleixner 357cfa82a00SThomas Gleixner .if \has_error_code == 0 358cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 359cfa82a00SThomas Gleixner .endif 360cfa82a00SThomas Gleixner 361cfa82a00SThomas Gleixner .if \vector == X86_TRAP_BP 362cfa82a00SThomas Gleixner /* 363cfa82a00SThomas Gleixner * If coming from kernel space, create a 6-word gap to allow the 364cfa82a00SThomas Gleixner * int3 handler to emulate a call instruction. 365cfa82a00SThomas Gleixner */ 366cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 367cfa82a00SThomas Gleixner jnz .Lfrom_usermode_no_gap_\@ 368cfa82a00SThomas Gleixner .rept 6 369cfa82a00SThomas Gleixner pushq 5*8(%rsp) 370cfa82a00SThomas Gleixner .endr 371cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 372cfa82a00SThomas Gleixner.Lfrom_usermode_no_gap_\@: 373cfa82a00SThomas Gleixner .endif 374cfa82a00SThomas Gleixner 375e2dcb5f1SThomas Gleixner idtentry_body \cfunc \has_error_code 376cfa82a00SThomas Gleixner 377cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 378cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 379cfa82a00SThomas Gleixner.endm 380cfa82a00SThomas Gleixner 381cfa82a00SThomas Gleixner/* 3820bf7c314SThomas Gleixner * Interrupt entry/exit. 3830bf7c314SThomas Gleixner * 3840bf7c314SThomas Gleixner + The interrupt stubs push (vector) onto the stack, which is the error_code 3850bf7c314SThomas Gleixner * position of idtentry exceptions, and jump to one of the two idtentry points 3860bf7c314SThomas Gleixner * (common/spurious). 3870bf7c314SThomas Gleixner * 3880bf7c314SThomas Gleixner * common_interrupt is a hotpath, align it to a cache line 3890bf7c314SThomas Gleixner */ 3900bf7c314SThomas Gleixner.macro idtentry_irq vector cfunc 3910bf7c314SThomas Gleixner .p2align CONFIG_X86_L1_CACHE_SHIFT 3920bf7c314SThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=1 3930bf7c314SThomas Gleixner.endm 3940bf7c314SThomas Gleixner 3950bf7c314SThomas Gleixner/* 3966368558cSThomas Gleixner * System vectors which invoke their handlers directly and are not 3976368558cSThomas Gleixner * going through the regular common device interrupt handling code. 3986368558cSThomas Gleixner */ 3996368558cSThomas Gleixner.macro idtentry_sysvec vector cfunc 4006368558cSThomas Gleixner idtentry \vector asm_\cfunc \cfunc has_error_code=0 4016368558cSThomas Gleixner.endm 4026368558cSThomas Gleixner 403cfa82a00SThomas Gleixner/** 404cfa82a00SThomas Gleixner * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB 405cfa82a00SThomas Gleixner * @vector: Vector number 406cfa82a00SThomas Gleixner * @asmsym: ASM symbol for the entry point 407cfa82a00SThomas Gleixner * @cfunc: C function to be called 408cfa82a00SThomas Gleixner * 409cfa82a00SThomas Gleixner * The macro emits code to set up the kernel context for #MC and #DB 410cfa82a00SThomas Gleixner * 411cfa82a00SThomas Gleixner * If the entry comes from user space it uses the normal entry path 412cfa82a00SThomas Gleixner * including the return to user space work and preemption checks on 413cfa82a00SThomas Gleixner * exit. 414cfa82a00SThomas Gleixner * 415cfa82a00SThomas Gleixner * If hits in kernel mode then it needs to go through the paranoid 416cfa82a00SThomas Gleixner * entry as the exception can hit any random state. No preemption 417cfa82a00SThomas Gleixner * check on exit to keep the paranoid path simple. 418cfa82a00SThomas Gleixner */ 419cfa82a00SThomas Gleixner.macro idtentry_mce_db vector asmsym cfunc 420cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 421cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS 422*8f93402bSPeter Zijlstra ENDBR 423cfa82a00SThomas Gleixner ASM_CLAC 424cfa82a00SThomas Gleixner 425cfa82a00SThomas Gleixner pushq $-1 /* ORIG_RAX: no syscall to restart */ 426cfa82a00SThomas Gleixner 427cfa82a00SThomas Gleixner /* 428cfa82a00SThomas Gleixner * If the entry is from userspace, switch stacks and treat it as 429cfa82a00SThomas Gleixner * a normal entry. 430cfa82a00SThomas Gleixner */ 431cfa82a00SThomas Gleixner testb $3, CS-ORIG_RAX(%rsp) 432cfa82a00SThomas Gleixner jnz .Lfrom_usermode_switch_stack_\@ 433cfa82a00SThomas Gleixner 434c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 435cfa82a00SThomas Gleixner call paranoid_entry 436cfa82a00SThomas Gleixner 437cfa82a00SThomas Gleixner UNWIND_HINT_REGS 438cfa82a00SThomas Gleixner 439cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer */ 440cfa82a00SThomas Gleixner 441cfa82a00SThomas Gleixner call \cfunc 442cfa82a00SThomas Gleixner 443cfa82a00SThomas Gleixner jmp paranoid_exit 444cfa82a00SThomas Gleixner 445cfa82a00SThomas Gleixner /* Switch to the regular task stack and use the noist entry point */ 446cfa82a00SThomas Gleixner.Lfrom_usermode_switch_stack_\@: 447e2dcb5f1SThomas Gleixner idtentry_body noist_\cfunc, has_error_code=0 448cfa82a00SThomas Gleixner 449cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 450cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 451cfa82a00SThomas Gleixner.endm 452cfa82a00SThomas Gleixner 453a13644f3SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 454a13644f3SJoerg Roedel/** 455a13644f3SJoerg Roedel * idtentry_vc - Macro to generate entry stub for #VC 456a13644f3SJoerg Roedel * @vector: Vector number 457a13644f3SJoerg Roedel * @asmsym: ASM symbol for the entry point 458a13644f3SJoerg Roedel * @cfunc: C function to be called 459a13644f3SJoerg Roedel * 460a13644f3SJoerg Roedel * The macro emits code to set up the kernel context for #VC. The #VC handler 461a13644f3SJoerg Roedel * runs on an IST stack and needs to be able to cause nested #VC exceptions. 462a13644f3SJoerg Roedel * 463a13644f3SJoerg Roedel * To make this work the #VC entry code tries its best to pretend it doesn't use 464a13644f3SJoerg Roedel * an IST stack by switching to the task stack if coming from user-space (which 465a13644f3SJoerg Roedel * includes early SYSCALL entry path) or back to the stack in the IRET frame if 466a13644f3SJoerg Roedel * entered from kernel-mode. 467a13644f3SJoerg Roedel * 468a13644f3SJoerg Roedel * If entered from kernel-mode the return stack is validated first, and if it is 469a13644f3SJoerg Roedel * not safe to use (e.g. because it points to the entry stack) the #VC handler 470a13644f3SJoerg Roedel * will switch to a fall-back stack (VC2) and call a special handler function. 471a13644f3SJoerg Roedel * 472a13644f3SJoerg Roedel * The macro is only used for one vector, but it is planned to be extended in 473a13644f3SJoerg Roedel * the future for the #HV exception. 474a13644f3SJoerg Roedel */ 475a13644f3SJoerg Roedel.macro idtentry_vc vector asmsym cfunc 476a13644f3SJoerg RoedelSYM_CODE_START(\asmsym) 477a13644f3SJoerg Roedel UNWIND_HINT_IRET_REGS 478*8f93402bSPeter Zijlstra ENDBR 479a13644f3SJoerg Roedel ASM_CLAC 480a13644f3SJoerg Roedel 481a13644f3SJoerg Roedel /* 482a13644f3SJoerg Roedel * If the entry is from userspace, switch stacks and treat it as 483a13644f3SJoerg Roedel * a normal entry. 484a13644f3SJoerg Roedel */ 485a13644f3SJoerg Roedel testb $3, CS-ORIG_RAX(%rsp) 486a13644f3SJoerg Roedel jnz .Lfrom_usermode_switch_stack_\@ 487a13644f3SJoerg Roedel 488a13644f3SJoerg Roedel /* 489a13644f3SJoerg Roedel * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. 490a13644f3SJoerg Roedel * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS 491a13644f3SJoerg Roedel */ 492a13644f3SJoerg Roedel call paranoid_entry 493a13644f3SJoerg Roedel 494a13644f3SJoerg Roedel UNWIND_HINT_REGS 495a13644f3SJoerg Roedel 496a13644f3SJoerg Roedel /* 497a13644f3SJoerg Roedel * Switch off the IST stack to make it free for nested exceptions. The 498a13644f3SJoerg Roedel * vc_switch_off_ist() function will switch back to the interrupted 499a13644f3SJoerg Roedel * stack if it is safe to do so. If not it switches to the VC fall-back 500a13644f3SJoerg Roedel * stack. 501a13644f3SJoerg Roedel */ 502a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 503a13644f3SJoerg Roedel call vc_switch_off_ist 504a13644f3SJoerg Roedel movq %rax, %rsp /* Switch to new stack */ 505a13644f3SJoerg Roedel 506a13644f3SJoerg Roedel UNWIND_HINT_REGS 507a13644f3SJoerg Roedel 508a13644f3SJoerg Roedel /* Update pt_regs */ 509a13644f3SJoerg Roedel movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 510a13644f3SJoerg Roedel movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 511a13644f3SJoerg Roedel 512a13644f3SJoerg Roedel movq %rsp, %rdi /* pt_regs pointer */ 513a13644f3SJoerg Roedel 514be1a5408SJoerg Roedel call kernel_\cfunc 515a13644f3SJoerg Roedel 516a13644f3SJoerg Roedel /* 517a13644f3SJoerg Roedel * No need to switch back to the IST stack. The current stack is either 518a13644f3SJoerg Roedel * identical to the stack in the IRET frame or the VC fall-back stack, 519163b0991SIngo Molnar * so it is definitely mapped even with PTI enabled. 520a13644f3SJoerg Roedel */ 521a13644f3SJoerg Roedel jmp paranoid_exit 522a13644f3SJoerg Roedel 523a13644f3SJoerg Roedel /* Switch to the regular task stack */ 524a13644f3SJoerg Roedel.Lfrom_usermode_switch_stack_\@: 525be1a5408SJoerg Roedel idtentry_body user_\cfunc, has_error_code=1 526a13644f3SJoerg Roedel 527a13644f3SJoerg Roedel_ASM_NOKPROBE(\asmsym) 528a13644f3SJoerg RoedelSYM_CODE_END(\asmsym) 529a13644f3SJoerg Roedel.endm 530a13644f3SJoerg Roedel#endif 531a13644f3SJoerg Roedel 532cfa82a00SThomas Gleixner/* 533cfa82a00SThomas Gleixner * Double fault entry. Straight paranoid. No checks from which context 534cfa82a00SThomas Gleixner * this comes because for the espfix induced #DF this would do the wrong 535cfa82a00SThomas Gleixner * thing. 536cfa82a00SThomas Gleixner */ 537cfa82a00SThomas Gleixner.macro idtentry_df vector asmsym cfunc 538cfa82a00SThomas GleixnerSYM_CODE_START(\asmsym) 539cfa82a00SThomas Gleixner UNWIND_HINT_IRET_REGS offset=8 540*8f93402bSPeter Zijlstra ENDBR 541cfa82a00SThomas Gleixner ASM_CLAC 542cfa82a00SThomas Gleixner 543c82965f9SChang S. Bae /* paranoid_entry returns GS information for paranoid_exit in EBX. */ 544cfa82a00SThomas Gleixner call paranoid_entry 545cfa82a00SThomas Gleixner UNWIND_HINT_REGS 546cfa82a00SThomas Gleixner 547cfa82a00SThomas Gleixner movq %rsp, %rdi /* pt_regs pointer into first argument */ 548cfa82a00SThomas Gleixner movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ 549cfa82a00SThomas Gleixner movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 550cfa82a00SThomas Gleixner call \cfunc 551cfa82a00SThomas Gleixner 552cfa82a00SThomas Gleixner jmp paranoid_exit 553cfa82a00SThomas Gleixner 554cfa82a00SThomas Gleixner_ASM_NOKPROBE(\asmsym) 555cfa82a00SThomas GleixnerSYM_CODE_END(\asmsym) 556cfa82a00SThomas Gleixner.endm 557cfa82a00SThomas Gleixner 558905a36a2SIngo Molnar/* 55953aaf262SThomas Gleixner * Include the defines which emit the idt entries which are shared 560f0178fc0SThomas Gleixner * shared between 32 and 64 bit and emit the __irqentry_text_* markers 561f0178fc0SThomas Gleixner * so the stacktrace boundary checks work. 56253aaf262SThomas Gleixner */ 563f0178fc0SThomas Gleixner .align 16 564f0178fc0SThomas Gleixner .globl __irqentry_text_start 565f0178fc0SThomas Gleixner__irqentry_text_start: 566f0178fc0SThomas Gleixner 56753aaf262SThomas Gleixner#include <asm/idtentry.h> 56853aaf262SThomas Gleixner 569f0178fc0SThomas Gleixner .align 16 570f0178fc0SThomas Gleixner .globl __irqentry_text_end 571f0178fc0SThomas Gleixner__irqentry_text_end: 572f0178fc0SThomas Gleixner 573fa5e5c40SThomas GleixnerSYM_CODE_START_LOCAL(common_interrupt_return) 57426ba4e57SJiri SlabySYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) 57526c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 57626c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates user mode. */ 5771e4c4f61SBorislav Petkov testb $3, CS(%rsp) 57826c4ef9cSAndy Lutomirski jnz 1f 57926c4ef9cSAndy Lutomirski ud2 58026c4ef9cSAndy Lutomirski1: 58126c4ef9cSAndy Lutomirski#endif 5825c8f6a2eSLai Jiangshan#ifdef CONFIG_XEN_PV 5835c8f6a2eSLai Jiangshan ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV 5845c8f6a2eSLai Jiangshan#endif 5855c8f6a2eSLai Jiangshan 586502af0d7SDominik Brodowski POP_REGS pop_rdi=0 5873e3b9293SAndy Lutomirski 5883e3b9293SAndy Lutomirski /* 5893e3b9293SAndy Lutomirski * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 5903e3b9293SAndy Lutomirski * Save old stack pointer and switch to trampoline stack. 5913e3b9293SAndy Lutomirski */ 5923e3b9293SAndy Lutomirski movq %rsp, %rdi 593c482feefSAndy Lutomirski movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 5941fb14363SJosh Poimboeuf UNWIND_HINT_EMPTY 5953e3b9293SAndy Lutomirski 5963e3b9293SAndy Lutomirski /* Copy the IRET frame to the trampoline stack. */ 5973e3b9293SAndy Lutomirski pushq 6*8(%rdi) /* SS */ 5983e3b9293SAndy Lutomirski pushq 5*8(%rdi) /* RSP */ 5993e3b9293SAndy Lutomirski pushq 4*8(%rdi) /* EFLAGS */ 6003e3b9293SAndy Lutomirski pushq 3*8(%rdi) /* CS */ 6013e3b9293SAndy Lutomirski pushq 2*8(%rdi) /* RIP */ 6023e3b9293SAndy Lutomirski 6033e3b9293SAndy Lutomirski /* Push user RDI on the trampoline stack. */ 6043e3b9293SAndy Lutomirski pushq (%rdi) 6053e3b9293SAndy Lutomirski 6063e3b9293SAndy Lutomirski /* 6073e3b9293SAndy Lutomirski * We are on the trampoline stack. All regs except RDI are live. 6083e3b9293SAndy Lutomirski * We can do future final exit work right here. 6093e3b9293SAndy Lutomirski */ 610afaef01cSAlexander Popov STACKLEAK_ERASE_NOCLOBBER 6113e3b9293SAndy Lutomirski 6126fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 6138a09317bSDave Hansen 6143e3b9293SAndy Lutomirski /* Restore RDI. */ 6153e3b9293SAndy Lutomirski popq %rdi 6166cf3e4c0SPeter Zijlstra swapgs 6178b87d8ceSPeter Zijlstra jmp .Lnative_iret 61826c4ef9cSAndy Lutomirski 619905a36a2SIngo Molnar 62026ba4e57SJiri SlabySYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) 62126c4ef9cSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 62226c4ef9cSAndy Lutomirski /* Assert that pt_regs indicates kernel mode. */ 6231e4c4f61SBorislav Petkov testb $3, CS(%rsp) 62426c4ef9cSAndy Lutomirski jz 1f 62526c4ef9cSAndy Lutomirski ud2 62626c4ef9cSAndy Lutomirski1: 62726c4ef9cSAndy Lutomirski#endif 628502af0d7SDominik Brodowski POP_REGS 629e872045bSAndy Lutomirski addq $8, %rsp /* skip regs->orig_ax */ 63010bcc80eSMathieu Desnoyers /* 63110bcc80eSMathieu Desnoyers * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 63210bcc80eSMathieu Desnoyers * when returning from IPI handler. 63310bcc80eSMathieu Desnoyers */ 6348b87d8ceSPeter Zijlstra#ifdef CONFIG_XEN_PV 6358b87d8ceSPeter ZijlstraSYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL) 6368b87d8ceSPeter Zijlstra ANNOTATE_NOENDBR 6378b87d8ceSPeter Zijlstra .byte 0xe9 6388b87d8ceSPeter Zijlstra .long .Lnative_iret - (. + 4) 6398b87d8ceSPeter Zijlstra#endif 640905a36a2SIngo Molnar 6418b87d8ceSPeter Zijlstra.Lnative_iret: 6428c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 643905a36a2SIngo Molnar /* 644905a36a2SIngo Molnar * Are we returning to a stack segment from the LDT? Note: in 645905a36a2SIngo Molnar * 64-bit mode SS:RSP on the exception stack is always valid. 646905a36a2SIngo Molnar */ 647905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 648905a36a2SIngo Molnar testb $4, (SS-RIP)(%rsp) 649905a36a2SIngo Molnar jnz native_irq_return_ldt 650905a36a2SIngo Molnar#endif 651905a36a2SIngo Molnar 652cc66936eSJiri SlabySYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) 653905a36a2SIngo Molnar /* 654905a36a2SIngo Molnar * This may fault. Non-paranoid faults on return to userspace are 655905a36a2SIngo Molnar * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 656c29c775aSThomas Gleixner * Double-faults due to espfix64 are handled in exc_double_fault. 657905a36a2SIngo Molnar * Other faults here are fatal. 658905a36a2SIngo Molnar */ 659905a36a2SIngo Molnar iretq 660905a36a2SIngo Molnar 661905a36a2SIngo Molnar#ifdef CONFIG_X86_ESPFIX64 662905a36a2SIngo Molnarnative_irq_return_ldt: 66385063facSAndy Lutomirski /* 66485063facSAndy Lutomirski * We are running with user GSBASE. All GPRs contain their user 66585063facSAndy Lutomirski * values. We have a percpu ESPFIX stack that is eight slots 66685063facSAndy Lutomirski * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 66785063facSAndy Lutomirski * of the ESPFIX stack. 66885063facSAndy Lutomirski * 66985063facSAndy Lutomirski * We clobber RAX and RDI in this code. We stash RDI on the 67085063facSAndy Lutomirski * normal stack and RAX on the ESPFIX stack. 67185063facSAndy Lutomirski * 67285063facSAndy Lutomirski * The ESPFIX stack layout we set up looks like this: 67385063facSAndy Lutomirski * 67485063facSAndy Lutomirski * --- top of ESPFIX stack --- 67585063facSAndy Lutomirski * SS 67685063facSAndy Lutomirski * RSP 67785063facSAndy Lutomirski * RFLAGS 67885063facSAndy Lutomirski * CS 67985063facSAndy Lutomirski * RIP <-- RSP points here when we're done 68085063facSAndy Lutomirski * RAX <-- espfix_waddr points here 68185063facSAndy Lutomirski * --- bottom of ESPFIX stack --- 68285063facSAndy Lutomirski */ 68385063facSAndy Lutomirski 68485063facSAndy Lutomirski pushq %rdi /* Stash user RDI */ 68553c9d924SJuergen Gross swapgs /* to kernel GS */ 6868a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 6878a09317bSDave Hansen 688905a36a2SIngo Molnar movq PER_CPU_VAR(espfix_waddr), %rdi 68985063facSAndy Lutomirski movq %rax, (0*8)(%rdi) /* user RAX */ 69085063facSAndy Lutomirski movq (1*8)(%rsp), %rax /* user RIP */ 691905a36a2SIngo Molnar movq %rax, (1*8)(%rdi) 69285063facSAndy Lutomirski movq (2*8)(%rsp), %rax /* user CS */ 693905a36a2SIngo Molnar movq %rax, (2*8)(%rdi) 69485063facSAndy Lutomirski movq (3*8)(%rsp), %rax /* user RFLAGS */ 695905a36a2SIngo Molnar movq %rax, (3*8)(%rdi) 69685063facSAndy Lutomirski movq (5*8)(%rsp), %rax /* user SS */ 697905a36a2SIngo Molnar movq %rax, (5*8)(%rdi) 69885063facSAndy Lutomirski movq (4*8)(%rsp), %rax /* user RSP */ 699905a36a2SIngo Molnar movq %rax, (4*8)(%rdi) 70085063facSAndy Lutomirski /* Now RAX == RSP. */ 70185063facSAndy Lutomirski 70285063facSAndy Lutomirski andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 70385063facSAndy Lutomirski 70485063facSAndy Lutomirski /* 70585063facSAndy Lutomirski * espfix_stack[31:16] == 0. The page tables are set up such that 70685063facSAndy Lutomirski * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 70785063facSAndy Lutomirski * espfix_waddr for any X. That is, there are 65536 RO aliases of 70885063facSAndy Lutomirski * the same page. Set up RSP so that RSP[31:16] contains the 70985063facSAndy Lutomirski * respective 16 bits of the /userspace/ RSP and RSP nonetheless 71085063facSAndy Lutomirski * still points to an RO alias of the ESPFIX stack. 71185063facSAndy Lutomirski */ 712905a36a2SIngo Molnar orq PER_CPU_VAR(espfix_stack), %rax 7138a09317bSDave Hansen 7146fd166aaSPeter Zijlstra SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 71553c9d924SJuergen Gross swapgs /* to user GS */ 7168a09317bSDave Hansen popq %rdi /* Restore user RDI */ 7178a09317bSDave Hansen 718905a36a2SIngo Molnar movq %rax, %rsp 7198c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 72085063facSAndy Lutomirski 72185063facSAndy Lutomirski /* 72285063facSAndy Lutomirski * At this point, we cannot write to the stack any more, but we can 72385063facSAndy Lutomirski * still read. 72485063facSAndy Lutomirski */ 72585063facSAndy Lutomirski popq %rax /* Restore user RAX */ 72685063facSAndy Lutomirski 72785063facSAndy Lutomirski /* 72885063facSAndy Lutomirski * RSP now points to an ordinary IRET frame, except that the page 72985063facSAndy Lutomirski * is read-only and RSP[31:16] are preloaded with the userspace 73085063facSAndy Lutomirski * values. We can now IRET back to userspace. 73185063facSAndy Lutomirski */ 732905a36a2SIngo Molnar jmp native_irq_return_iret 733905a36a2SIngo Molnar#endif 734fa5e5c40SThomas GleixnerSYM_CODE_END(common_interrupt_return) 735fa5e5c40SThomas Gleixner_ASM_NOKPROBE(common_interrupt_return) 736905a36a2SIngo Molnar 737905a36a2SIngo Molnar/* 7384d732138SIngo Molnar * Reload gs selector with exception handling 7394d732138SIngo Molnar * edi: new selector 740b9f6976bSThomas Gleixner * 741b9f6976bSThomas Gleixner * Is in entry.text as it shouldn't be instrumented. 7424d732138SIngo Molnar */ 743410367e3SThomas GleixnerSYM_FUNC_START(asm_load_gs_index) 7448c1f7558SJosh Poimboeuf FRAME_BEGIN 745c9317202SThomas Gleixner swapgs 74642c748bbSBorislav Petkov.Lgs_change: 747905a36a2SIngo Molnar movl %edi, %gs 74896e5d28aSBorislav Petkov2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 749c9317202SThomas Gleixner swapgs 7508c1f7558SJosh Poimboeuf FRAME_END 751f94909ceSPeter Zijlstra RET 752905a36a2SIngo Molnar 753905a36a2SIngo Molnar /* running with kernelgs */ 75416e617d0SPeter Zijlstra.Lbad_gs: 755c9317202SThomas Gleixner swapgs /* switch back to user gs */ 756b038c842SAndy Lutomirski.macro ZAP_GS 757b038c842SAndy Lutomirski /* This can't be a string because the preprocessor needs to see it. */ 758b038c842SAndy Lutomirski movl $__USER_DS, %eax 759b038c842SAndy Lutomirski movl %eax, %gs 760b038c842SAndy Lutomirski.endm 761b038c842SAndy Lutomirski ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 762905a36a2SIngo Molnar xorl %eax, %eax 763905a36a2SIngo Molnar movl %eax, %gs 764905a36a2SIngo Molnar jmp 2b 76516e617d0SPeter Zijlstra 76616e617d0SPeter Zijlstra _ASM_EXTABLE(.Lgs_change, .Lbad_gs) 76716e617d0SPeter Zijlstra 76816e617d0SPeter ZijlstraSYM_FUNC_END(asm_load_gs_index) 76916e617d0SPeter ZijlstraEXPORT_SYMBOL(asm_load_gs_index) 770905a36a2SIngo Molnar 77128c11b0fSJuergen Gross#ifdef CONFIG_XEN_PV 772905a36a2SIngo Molnar/* 773905a36a2SIngo Molnar * A note on the "critical region" in our callback handler. 774905a36a2SIngo Molnar * We want to avoid stacking callback handlers due to events occurring 775905a36a2SIngo Molnar * during handling of the last event. To do this, we keep events disabled 776905a36a2SIngo Molnar * until we've done all processing. HOWEVER, we must enable events before 777905a36a2SIngo Molnar * popping the stack frame (can't be done atomically) and so it would still 778905a36a2SIngo Molnar * be possible to get enough handler activations to overflow the stack. 779905a36a2SIngo Molnar * Although unlikely, bugs of that kind are hard to track down, so we'd 780905a36a2SIngo Molnar * like to avoid the possibility. 781905a36a2SIngo Molnar * So, on entry to the handler we detect whether we interrupted an 782905a36a2SIngo Molnar * existing activation in its critical region -- if so, we pop the current 783905a36a2SIngo Molnar * activation and restart the handler using the previous one. 7842f6474e4SThomas Gleixner * 7852f6474e4SThomas Gleixner * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) 786905a36a2SIngo Molnar */ 7872f6474e4SThomas GleixnerSYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) 7884d732138SIngo Molnar 789905a36a2SIngo Molnar/* 790905a36a2SIngo Molnar * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 791905a36a2SIngo Molnar * see the correct pointer to the pt_regs 792905a36a2SIngo Molnar */ 7938c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 7944d732138SIngo Molnar movq %rdi, %rsp /* we don't return, adjust the stack frame */ 7958c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 7961d3e53e8SAndy Lutomirski 7972f6474e4SThomas Gleixner call xen_pv_evtchn_do_upcall 7981d3e53e8SAndy Lutomirski 7992f6474e4SThomas Gleixner jmp error_return 8002f6474e4SThomas GleixnerSYM_CODE_END(exc_xen_hypervisor_callback) 801905a36a2SIngo Molnar 802905a36a2SIngo Molnar/* 803905a36a2SIngo Molnar * Hypervisor uses this for application faults while it executes. 804905a36a2SIngo Molnar * We get here for two reasons: 805905a36a2SIngo Molnar * 1. Fault while reloading DS, ES, FS or GS 806905a36a2SIngo Molnar * 2. Fault while executing IRET 807905a36a2SIngo Molnar * Category 1 we do not need to fix up as Xen has already reloaded all segment 808905a36a2SIngo Molnar * registers that could be reloaded and zeroed the others. 809905a36a2SIngo Molnar * Category 2 we fix up by killing the current process. We cannot use the 810905a36a2SIngo Molnar * normal Linux return path in this case because if we use the IRET hypercall 811905a36a2SIngo Molnar * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 812905a36a2SIngo Molnar * We distinguish between categories by comparing each saved segment register 813905a36a2SIngo Molnar * with its current contents: any discrepancy means we in category 1. 814905a36a2SIngo Molnar */ 815bc7b11c0SJiri SlabySYM_CODE_START(xen_failsafe_callback) 8168c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 8175b2fc515SPeter Zijlstra ENDBR 818905a36a2SIngo Molnar movl %ds, %ecx 819905a36a2SIngo Molnar cmpw %cx, 0x10(%rsp) 820905a36a2SIngo Molnar jne 1f 821905a36a2SIngo Molnar movl %es, %ecx 822905a36a2SIngo Molnar cmpw %cx, 0x18(%rsp) 823905a36a2SIngo Molnar jne 1f 824905a36a2SIngo Molnar movl %fs, %ecx 825905a36a2SIngo Molnar cmpw %cx, 0x20(%rsp) 826905a36a2SIngo Molnar jne 1f 827905a36a2SIngo Molnar movl %gs, %ecx 828905a36a2SIngo Molnar cmpw %cx, 0x28(%rsp) 829905a36a2SIngo Molnar jne 1f 830905a36a2SIngo Molnar /* All segments match their saved values => Category 2 (Bad IRET). */ 831905a36a2SIngo Molnar movq (%rsp), %rcx 832905a36a2SIngo Molnar movq 8(%rsp), %r11 833905a36a2SIngo Molnar addq $0x30, %rsp 834905a36a2SIngo Molnar pushq $0 /* RIP */ 8358c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 836be4c11afSThomas Gleixner jmp asm_exc_general_protection 837905a36a2SIngo Molnar1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 838905a36a2SIngo Molnar movq (%rsp), %rcx 839905a36a2SIngo Molnar movq 8(%rsp), %r11 840905a36a2SIngo Molnar addq $0x30, %rsp 8418c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 842905a36a2SIngo Molnar pushq $-1 /* orig_ax = -1 => not a system call */ 8433f01daecSDominik Brodowski PUSH_AND_CLEAR_REGS 844946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 845e88d9741SThomas Gleixner jmp error_return 846bc7b11c0SJiri SlabySYM_CODE_END(xen_failsafe_callback) 84728c11b0fSJuergen Gross#endif /* CONFIG_XEN_PV */ 848905a36a2SIngo Molnar 849905a36a2SIngo Molnar/* 850c82965f9SChang S. Bae * Save all registers in pt_regs. Return GSBASE related information 851c82965f9SChang S. Bae * in EBX depending on the availability of the FSGSBASE instructions: 852c82965f9SChang S. Bae * 853c82965f9SChang S. Bae * FSGSBASE R/EBX 854c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 855c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 856c82965f9SChang S. Bae * 857c82965f9SChang S. Bae * Y GSBASE value at entry, must be restored in paranoid_exit 858905a36a2SIngo Molnar */ 859ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_entry) 8608c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 861905a36a2SIngo Molnar cld 8629e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 8639e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 8648a09317bSDave Hansen 86516561f27SDave Hansen /* 86616561f27SDave Hansen * Always stash CR3 in %r14. This value will be restored, 867ae852495SAndy Lutomirski * verbatim, at exit. Needed if paranoid_entry interrupted 868ae852495SAndy Lutomirski * another entry that already switched to the user CR3 value 869ae852495SAndy Lutomirski * but has not yet returned to userspace. 87016561f27SDave Hansen * 87116561f27SDave Hansen * This is also why CS (stashed in the "iret frame" by the 87216561f27SDave Hansen * hardware at entry) can not be used: this may be a return 873ae852495SAndy Lutomirski * to kernel code, but with a user CR3 value. 87496b23714SChang S. Bae * 87596b23714SChang S. Bae * Switching CR3 does not depend on kernel GSBASE so it can 87696b23714SChang S. Bae * be done before switching to the kernel GSBASE. This is 87796b23714SChang S. Bae * required for FSGSBASE because the kernel GSBASE has to 87896b23714SChang S. Bae * be retrieved from a kernel internal table. 87916561f27SDave Hansen */ 8808a09317bSDave Hansen SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 8818a09317bSDave Hansen 88218ec54fdSJosh Poimboeuf /* 883c82965f9SChang S. Bae * Handling GSBASE depends on the availability of FSGSBASE. 884c82965f9SChang S. Bae * 885c82965f9SChang S. Bae * Without FSGSBASE the kernel enforces that negative GSBASE 886c82965f9SChang S. Bae * values indicate kernel GSBASE. With FSGSBASE no assumptions 887c82965f9SChang S. Bae * can be made about the GSBASE value when entering from user 888c82965f9SChang S. Bae * space. 889c82965f9SChang S. Bae */ 890c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 891c82965f9SChang S. Bae 892c82965f9SChang S. Bae /* 893c82965f9SChang S. Bae * Read the current GSBASE and store it in %rbx unconditionally, 894c82965f9SChang S. Bae * retrieve and set the current CPUs kernel GSBASE. The stored value 895c82965f9SChang S. Bae * has to be restored in paranoid_exit unconditionally. 896c82965f9SChang S. Bae * 8970b2c605fSBorislav Petkov * The unconditional write to GS base below ensures that no subsequent 8980b2c605fSBorislav Petkov * loads based on a mispredicted GS base can happen, therefore no LFENCE 8990b2c605fSBorislav Petkov * is needed here. 900c82965f9SChang S. Bae */ 901c82965f9SChang S. Bae SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 902f94909ceSPeter Zijlstra RET 903c82965f9SChang S. Bae 904c82965f9SChang S. Bae.Lparanoid_entry_checkgs: 90596b23714SChang S. Bae /* EBX = 1 -> kernel GSBASE active, no restore required */ 90696b23714SChang S. Bae movl $1, %ebx 907c07e4555SLai Jiangshan 90896b23714SChang S. Bae /* 90996b23714SChang S. Bae * The kernel-enforced convention is a negative GSBASE indicates 91096b23714SChang S. Bae * a kernel value. No SWAPGS needed on entry and exit. 91196b23714SChang S. Bae */ 91296b23714SChang S. Bae movl $MSR_GS_BASE, %ecx 91396b23714SChang S. Bae rdmsr 91496b23714SChang S. Bae testl %edx, %edx 915c07e4555SLai Jiangshan js .Lparanoid_kernel_gsbase 91618ec54fdSJosh Poimboeuf 91796b23714SChang S. Bae /* EBX = 0 -> SWAPGS required on exit */ 91896b23714SChang S. Bae xorl %ebx, %ebx 919c07e4555SLai Jiangshan swapgs 920c07e4555SLai Jiangshan.Lparanoid_kernel_gsbase: 921c07e4555SLai Jiangshan 922c07e4555SLai Jiangshan FENCE_SWAPGS_KERNEL_ENTRY 923f94909ceSPeter Zijlstra RET 924ef1e0315SJiri SlabySYM_CODE_END(paranoid_entry) 925905a36a2SIngo Molnar 926905a36a2SIngo Molnar/* 927905a36a2SIngo Molnar * "Paranoid" exit path from exception stack. This is invoked 928905a36a2SIngo Molnar * only on return from non-NMI IST interrupts that came 929905a36a2SIngo Molnar * from kernel space. 930905a36a2SIngo Molnar * 931905a36a2SIngo Molnar * We may be returning to very strange contexts (e.g. very early 932905a36a2SIngo Molnar * in syscall entry), so checking for preemption here would 933c82965f9SChang S. Bae * be complicated. Fortunately, there's no good reason to try 934c82965f9SChang S. Bae * to handle preemption here. 9354d732138SIngo Molnar * 936c82965f9SChang S. Bae * R/EBX contains the GSBASE related information depending on the 937c82965f9SChang S. Bae * availability of the FSGSBASE instructions: 938c82965f9SChang S. Bae * 939c82965f9SChang S. Bae * FSGSBASE R/EBX 940c82965f9SChang S. Bae * N 0 -> SWAPGS on exit 941c82965f9SChang S. Bae * 1 -> no SWAPGS on exit 942c82965f9SChang S. Bae * 943c82965f9SChang S. Bae * Y User space GSBASE, must be restored unconditionally 944905a36a2SIngo Molnar */ 945ef1e0315SJiri SlabySYM_CODE_START_LOCAL(paranoid_exit) 9468c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 947c82965f9SChang S. Bae /* 948c82965f9SChang S. Bae * The order of operations is important. RESTORE_CR3 requires 949c82965f9SChang S. Bae * kernel GSBASE. 950c82965f9SChang S. Bae * 951c82965f9SChang S. Bae * NB to anyone to try to optimize this code: this code does 952c82965f9SChang S. Bae * not execute at all for exceptions from user mode. Those 953c82965f9SChang S. Bae * exceptions go through error_exit instead. 954c82965f9SChang S. Bae */ 955c82965f9SChang S. Bae RESTORE_CR3 scratch_reg=%rax save_reg=%r14 956c82965f9SChang S. Bae 957c82965f9SChang S. Bae /* Handle the three GSBASE cases */ 958c82965f9SChang S. Bae ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE 959c82965f9SChang S. Bae 960c82965f9SChang S. Bae /* With FSGSBASE enabled, unconditionally restore GSBASE */ 961c82965f9SChang S. Bae wrgsbase %rbx 96245c08383SThomas Gleixner jmp restore_regs_and_return_to_kernel 963c82965f9SChang S. Bae 964c82965f9SChang S. Bae.Lparanoid_exit_checkgs: 965c82965f9SChang S. Bae /* On non-FSGSBASE systems, conditionally do SWAPGS */ 966c82965f9SChang S. Bae testl %ebx, %ebx 967c82965f9SChang S. Bae jnz restore_regs_and_return_to_kernel 968c82965f9SChang S. Bae 969c82965f9SChang S. Bae /* We are returning to a context with user GSBASE */ 97053c9d924SJuergen Gross swapgs 971e5317832SAndy Lutomirski jmp restore_regs_and_return_to_kernel 972ef1e0315SJiri SlabySYM_CODE_END(paranoid_exit) 973905a36a2SIngo Molnar 974905a36a2SIngo Molnar/* 9759e809d15SDominik Brodowski * Save all registers in pt_regs, and switch GS if needed. 976905a36a2SIngo Molnar */ 977ef1e0315SJiri SlabySYM_CODE_START_LOCAL(error_entry) 9789e809d15SDominik Brodowski UNWIND_HINT_FUNC 979905a36a2SIngo Molnar cld 9809e809d15SDominik Brodowski PUSH_AND_CLEAR_REGS save_ret=1 9819e809d15SDominik Brodowski ENCODE_FRAME_POINTER 8 982905a36a2SIngo Molnar testb $3, CS+8(%rsp) 983cb6f64edSAndy Lutomirski jz .Lerror_kernelspace 984539f5113SAndy Lutomirski 985cb6f64edSAndy Lutomirski /* 986cb6f64edSAndy Lutomirski * We entered from user mode or we're pretending to have entered 987cb6f64edSAndy Lutomirski * from user mode due to an IRET fault. 988cb6f64edSAndy Lutomirski */ 989905a36a2SIngo Molnar SWAPGS 99018ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 9918a09317bSDave Hansen /* We have user CR3. Change to kernel CR3. */ 9928a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 993539f5113SAndy Lutomirski 994cb6f64edSAndy Lutomirski.Lerror_entry_from_usermode_after_swapgs: 9957f2590a1SAndy Lutomirski /* Put us onto the real thread stack. */ 9967f2590a1SAndy Lutomirski popq %r12 /* save return addr in %12 */ 9977f2590a1SAndy Lutomirski movq %rsp, %rdi /* arg0 = pt_regs pointer */ 9987f2590a1SAndy Lutomirski call sync_regs 9997f2590a1SAndy Lutomirski movq %rax, %rsp /* switch stack */ 10007f2590a1SAndy Lutomirski ENCODE_FRAME_POINTER 10017f2590a1SAndy Lutomirski pushq %r12 1002f94909ceSPeter Zijlstra RET 100302bc7768SAndy Lutomirski 1004905a36a2SIngo Molnar /* 1005905a36a2SIngo Molnar * There are two places in the kernel that can potentially fault with 1006905a36a2SIngo Molnar * usergs. Handle them here. B stepping K8s sometimes report a 1007905a36a2SIngo Molnar * truncated RIP for IRET exceptions returning to compat mode. Check 1008905a36a2SIngo Molnar * for these here too. 1009905a36a2SIngo Molnar */ 1010cb6f64edSAndy Lutomirski.Lerror_kernelspace: 1011905a36a2SIngo Molnar leaq native_irq_return_iret(%rip), %rcx 1012905a36a2SIngo Molnar cmpq %rcx, RIP+8(%rsp) 1013cb6f64edSAndy Lutomirski je .Lerror_bad_iret 1014905a36a2SIngo Molnar movl %ecx, %eax /* zero extend */ 1015905a36a2SIngo Molnar cmpq %rax, RIP+8(%rsp) 1016cb6f64edSAndy Lutomirski je .Lbstep_iret 101742c748bbSBorislav Petkov cmpq $.Lgs_change, RIP+8(%rsp) 101818ec54fdSJosh Poimboeuf jne .Lerror_entry_done_lfence 1019539f5113SAndy Lutomirski 1020539f5113SAndy Lutomirski /* 102142c748bbSBorislav Petkov * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1022539f5113SAndy Lutomirski * gsbase and proceed. We'll fix up the exception and land in 102342c748bbSBorislav Petkov * .Lgs_change's error handler with kernel gsbase. 1024539f5113SAndy Lutomirski */ 10252fa5f04fSWanpeng Li SWAPGS 10261367afaaSLai Jiangshan 10271367afaaSLai Jiangshan /* 10281367afaaSLai Jiangshan * Issue an LFENCE to prevent GS speculation, regardless of whether it is a 10291367afaaSLai Jiangshan * kernel or user gsbase. 10301367afaaSLai Jiangshan */ 10311367afaaSLai Jiangshan.Lerror_entry_done_lfence: 10321367afaaSLai Jiangshan FENCE_SWAPGS_KERNEL_ENTRY 1033f94909ceSPeter Zijlstra RET 1034905a36a2SIngo Molnar 1035cb6f64edSAndy Lutomirski.Lbstep_iret: 1036905a36a2SIngo Molnar /* Fix truncated RIP */ 1037905a36a2SIngo Molnar movq %rcx, RIP+8(%rsp) 1038905a36a2SIngo Molnar /* fall through */ 1039905a36a2SIngo Molnar 1040cb6f64edSAndy Lutomirski.Lerror_bad_iret: 1041539f5113SAndy Lutomirski /* 10428a09317bSDave Hansen * We came from an IRET to user mode, so we have user 10438a09317bSDave Hansen * gsbase and CR3. Switch to kernel gsbase and CR3: 1044539f5113SAndy Lutomirski */ 1045905a36a2SIngo Molnar SWAPGS 104618ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 10478a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1048539f5113SAndy Lutomirski 1049539f5113SAndy Lutomirski /* 1050539f5113SAndy Lutomirski * Pretend that the exception came from user mode: set up pt_regs 1051b3681dd5SAndy Lutomirski * as if we faulted immediately after IRET. 1052539f5113SAndy Lutomirski */ 1053905a36a2SIngo Molnar mov %rsp, %rdi 1054905a36a2SIngo Molnar call fixup_bad_iret 1055905a36a2SIngo Molnar mov %rax, %rsp 1056cb6f64edSAndy Lutomirski jmp .Lerror_entry_from_usermode_after_swapgs 1057ef1e0315SJiri SlabySYM_CODE_END(error_entry) 1058905a36a2SIngo Molnar 1059424c7d0aSThomas GleixnerSYM_CODE_START_LOCAL(error_return) 1060424c7d0aSThomas Gleixner UNWIND_HINT_REGS 1061424c7d0aSThomas Gleixner DEBUG_ENTRY_ASSERT_IRQS_OFF 1062424c7d0aSThomas Gleixner testb $3, CS(%rsp) 1063424c7d0aSThomas Gleixner jz restore_regs_and_return_to_kernel 1064424c7d0aSThomas Gleixner jmp swapgs_restore_regs_and_return_to_usermode 1065424c7d0aSThomas GleixnerSYM_CODE_END(error_return) 1066424c7d0aSThomas Gleixner 1067929bacecSAndy Lutomirski/* 1068929bacecSAndy Lutomirski * Runs on exception stack. Xen PV does not go through this path at all, 1069929bacecSAndy Lutomirski * so we can use real assembly here. 10708a09317bSDave Hansen * 10718a09317bSDave Hansen * Registers: 10728a09317bSDave Hansen * %r14: Used to save/restore the CR3 of the interrupted context 10738a09317bSDave Hansen * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1074929bacecSAndy Lutomirski */ 10756271fef0SThomas GleixnerSYM_CODE_START(asm_exc_nmi) 10768c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1077*8f93402bSPeter Zijlstra ENDBR 1078929bacecSAndy Lutomirski 1079fc57a7c6SAndy Lutomirski /* 1080905a36a2SIngo Molnar * We allow breakpoints in NMIs. If a breakpoint occurs, then 1081905a36a2SIngo Molnar * the iretq it performs will take us out of NMI context. 1082905a36a2SIngo Molnar * This means that we can have nested NMIs where the next 1083905a36a2SIngo Molnar * NMI is using the top of the stack of the previous NMI. We 1084905a36a2SIngo Molnar * can't let it execute because the nested NMI will corrupt the 1085905a36a2SIngo Molnar * stack of the previous NMI. NMI handlers are not re-entrant 1086905a36a2SIngo Molnar * anyway. 1087905a36a2SIngo Molnar * 1088905a36a2SIngo Molnar * To handle this case we do the following: 1089905a36a2SIngo Molnar * Check the a special location on the stack that contains 1090905a36a2SIngo Molnar * a variable that is set when NMIs are executing. 1091905a36a2SIngo Molnar * The interrupted task's stack is also checked to see if it 1092905a36a2SIngo Molnar * is an NMI stack. 1093905a36a2SIngo Molnar * If the variable is not set and the stack is not the NMI 1094905a36a2SIngo Molnar * stack then: 1095905a36a2SIngo Molnar * o Set the special variable on the stack 10960b22930eSAndy Lutomirski * o Copy the interrupt frame into an "outermost" location on the 10970b22930eSAndy Lutomirski * stack 10980b22930eSAndy Lutomirski * o Copy the interrupt frame into an "iret" location on the stack 1099905a36a2SIngo Molnar * o Continue processing the NMI 1100905a36a2SIngo Molnar * If the variable is set or the previous stack is the NMI stack: 11010b22930eSAndy Lutomirski * o Modify the "iret" location to jump to the repeat_nmi 1102905a36a2SIngo Molnar * o return back to the first NMI 1103905a36a2SIngo Molnar * 1104905a36a2SIngo Molnar * Now on exit of the first NMI, we first clear the stack variable 1105905a36a2SIngo Molnar * The NMI stack will tell any nested NMIs at that point that it is 1106905a36a2SIngo Molnar * nested. Then we pop the stack normally with iret, and if there was 1107905a36a2SIngo Molnar * a nested NMI that updated the copy interrupt stack frame, a 1108905a36a2SIngo Molnar * jump will be made to the repeat_nmi code that will handle the second 1109905a36a2SIngo Molnar * NMI. 11109b6e6a83SAndy Lutomirski * 11119b6e6a83SAndy Lutomirski * However, espfix prevents us from directly returning to userspace 11129b6e6a83SAndy Lutomirski * with a single IRET instruction. Similarly, IRET to user mode 11139b6e6a83SAndy Lutomirski * can fault. We therefore handle NMIs from user space like 11149b6e6a83SAndy Lutomirski * other IST entries. 1115905a36a2SIngo Molnar */ 1116905a36a2SIngo Molnar 1117e93c1730SAndy Lutomirski ASM_CLAC 1118e93c1730SAndy Lutomirski 1119905a36a2SIngo Molnar /* Use %rdx as our temp variable throughout */ 1120905a36a2SIngo Molnar pushq %rdx 1121905a36a2SIngo Molnar 11229b6e6a83SAndy Lutomirski testb $3, CS-RIP+8(%rsp) 11239b6e6a83SAndy Lutomirski jz .Lnmi_from_kernel 1124905a36a2SIngo Molnar 1125905a36a2SIngo Molnar /* 11269b6e6a83SAndy Lutomirski * NMI from user mode. We need to run on the thread stack, but we 11279b6e6a83SAndy Lutomirski * can't go through the normal entry paths: NMIs are masked, and 11289b6e6a83SAndy Lutomirski * we don't want to enable interrupts, because then we'll end 11299b6e6a83SAndy Lutomirski * up in an awkward situation in which IRQs are on but NMIs 11309b6e6a83SAndy Lutomirski * are off. 113183c133cfSAndy Lutomirski * 113283c133cfSAndy Lutomirski * We also must not push anything to the stack before switching 113383c133cfSAndy Lutomirski * stacks lest we corrupt the "NMI executing" variable. 11349b6e6a83SAndy Lutomirski */ 11359b6e6a83SAndy Lutomirski 1136929bacecSAndy Lutomirski swapgs 11379b6e6a83SAndy Lutomirski cld 113818ec54fdSJosh Poimboeuf FENCE_SWAPGS_USER_ENTRY 11398a09317bSDave Hansen SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 11409b6e6a83SAndy Lutomirski movq %rsp, %rdx 11419b6e6a83SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 11428c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS base=%rdx offset=8 11439b6e6a83SAndy Lutomirski pushq 5*8(%rdx) /* pt_regs->ss */ 11449b6e6a83SAndy Lutomirski pushq 4*8(%rdx) /* pt_regs->rsp */ 11459b6e6a83SAndy Lutomirski pushq 3*8(%rdx) /* pt_regs->flags */ 11469b6e6a83SAndy Lutomirski pushq 2*8(%rdx) /* pt_regs->cs */ 11479b6e6a83SAndy Lutomirski pushq 1*8(%rdx) /* pt_regs->rip */ 11488c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 11499b6e6a83SAndy Lutomirski pushq $-1 /* pt_regs->orig_ax */ 115030907fd1SDominik Brodowski PUSH_AND_CLEAR_REGS rdx=(%rdx) 1151946c1911SJosh Poimboeuf ENCODE_FRAME_POINTER 11529b6e6a83SAndy Lutomirski 11539b6e6a83SAndy Lutomirski /* 11549b6e6a83SAndy Lutomirski * At this point we no longer need to worry about stack damage 11559b6e6a83SAndy Lutomirski * due to nesting -- we're on the normal thread stack and we're 11569b6e6a83SAndy Lutomirski * done with the NMI stack. 11579b6e6a83SAndy Lutomirski */ 11589b6e6a83SAndy Lutomirski 11599b6e6a83SAndy Lutomirski movq %rsp, %rdi 11609b6e6a83SAndy Lutomirski movq $-1, %rsi 11616271fef0SThomas Gleixner call exc_nmi 11629b6e6a83SAndy Lutomirski 11639b6e6a83SAndy Lutomirski /* 11649b6e6a83SAndy Lutomirski * Return back to user mode. We must *not* do the normal exit 1165946c1911SJosh Poimboeuf * work, because we don't want to enable interrupts. 11669b6e6a83SAndy Lutomirski */ 11678a055d7fSAndy Lutomirski jmp swapgs_restore_regs_and_return_to_usermode 11689b6e6a83SAndy Lutomirski 11699b6e6a83SAndy Lutomirski.Lnmi_from_kernel: 11709b6e6a83SAndy Lutomirski /* 11710b22930eSAndy Lutomirski * Here's what our stack frame will look like: 11720b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11730b22930eSAndy Lutomirski * | original SS | 11740b22930eSAndy Lutomirski * | original Return RSP | 11750b22930eSAndy Lutomirski * | original RFLAGS | 11760b22930eSAndy Lutomirski * | original CS | 11770b22930eSAndy Lutomirski * | original RIP | 11780b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11790b22930eSAndy Lutomirski * | temp storage for rdx | 11800b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11810b22930eSAndy Lutomirski * | "NMI executing" variable | 11820b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11830b22930eSAndy Lutomirski * | iret SS } Copied from "outermost" frame | 11840b22930eSAndy Lutomirski * | iret Return RSP } on each loop iteration; overwritten | 11850b22930eSAndy Lutomirski * | iret RFLAGS } by a nested NMI to force another | 11860b22930eSAndy Lutomirski * | iret CS } iteration if needed. | 11870b22930eSAndy Lutomirski * | iret RIP } | 11880b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11890b22930eSAndy Lutomirski * | outermost SS } initialized in first_nmi; | 11900b22930eSAndy Lutomirski * | outermost Return RSP } will not be changed before | 11910b22930eSAndy Lutomirski * | outermost RFLAGS } NMI processing is done. | 11920b22930eSAndy Lutomirski * | outermost CS } Copied to "iret" frame on each | 11930b22930eSAndy Lutomirski * | outermost RIP } iteration. | 11940b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11950b22930eSAndy Lutomirski * | pt_regs | 11960b22930eSAndy Lutomirski * +---------------------------------------------------------+ 11970b22930eSAndy Lutomirski * 11980b22930eSAndy Lutomirski * The "original" frame is used by hardware. Before re-enabling 11990b22930eSAndy Lutomirski * NMIs, we need to be done with it, and we need to leave enough 12000b22930eSAndy Lutomirski * space for the asm code here. 12010b22930eSAndy Lutomirski * 12020b22930eSAndy Lutomirski * We return by executing IRET while RSP points to the "iret" frame. 12030b22930eSAndy Lutomirski * That will either return for real or it will loop back into NMI 12040b22930eSAndy Lutomirski * processing. 12050b22930eSAndy Lutomirski * 12060b22930eSAndy Lutomirski * The "outermost" frame is copied to the "iret" frame on each 12070b22930eSAndy Lutomirski * iteration of the loop, so each iteration starts with the "iret" 12080b22930eSAndy Lutomirski * frame pointing to the final return target. 12090b22930eSAndy Lutomirski */ 12100b22930eSAndy Lutomirski 12110b22930eSAndy Lutomirski /* 12120b22930eSAndy Lutomirski * Determine whether we're a nested NMI. 12130b22930eSAndy Lutomirski * 1214a27507caSAndy Lutomirski * If we interrupted kernel code between repeat_nmi and 1215a27507caSAndy Lutomirski * end_repeat_nmi, then we are a nested NMI. We must not 1216a27507caSAndy Lutomirski * modify the "iret" frame because it's being written by 1217a27507caSAndy Lutomirski * the outer NMI. That's okay; the outer NMI handler is 12186271fef0SThomas Gleixner * about to about to call exc_nmi() anyway, so we can just 1219a27507caSAndy Lutomirski * resume the outer NMI. 1220a27507caSAndy Lutomirski */ 1221a27507caSAndy Lutomirski 1222a27507caSAndy Lutomirski movq $repeat_nmi, %rdx 1223a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1224a27507caSAndy Lutomirski ja 1f 1225a27507caSAndy Lutomirski movq $end_repeat_nmi, %rdx 1226a27507caSAndy Lutomirski cmpq 8(%rsp), %rdx 1227a27507caSAndy Lutomirski ja nested_nmi_out 1228a27507caSAndy Lutomirski1: 1229a27507caSAndy Lutomirski 1230a27507caSAndy Lutomirski /* 1231a27507caSAndy Lutomirski * Now check "NMI executing". If it's set, then we're nested. 12320b22930eSAndy Lutomirski * This will not detect if we interrupted an outer NMI just 12330b22930eSAndy Lutomirski * before IRET. 1234905a36a2SIngo Molnar */ 1235905a36a2SIngo Molnar cmpl $1, -8(%rsp) 1236905a36a2SIngo Molnar je nested_nmi 1237905a36a2SIngo Molnar 1238905a36a2SIngo Molnar /* 12390b22930eSAndy Lutomirski * Now test if the previous stack was an NMI stack. This covers 12400b22930eSAndy Lutomirski * the case where we interrupt an outer NMI after it clears 1241810bc075SAndy Lutomirski * "NMI executing" but before IRET. We need to be careful, though: 1242810bc075SAndy Lutomirski * there is one case in which RSP could point to the NMI stack 1243810bc075SAndy Lutomirski * despite there being no NMI active: naughty userspace controls 1244810bc075SAndy Lutomirski * RSP at the very beginning of the SYSCALL targets. We can 1245810bc075SAndy Lutomirski * pull a fast one on naughty userspace, though: we program 1246810bc075SAndy Lutomirski * SYSCALL to mask DF, so userspace cannot cause DF to be set 1247810bc075SAndy Lutomirski * if it controls the kernel's RSP. We set DF before we clear 1248810bc075SAndy Lutomirski * "NMI executing". 1249905a36a2SIngo Molnar */ 1250905a36a2SIngo Molnar lea 6*8(%rsp), %rdx 1251905a36a2SIngo Molnar /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1252905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1253905a36a2SIngo Molnar /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1254905a36a2SIngo Molnar ja first_nmi 12554d732138SIngo Molnar 1256905a36a2SIngo Molnar subq $EXCEPTION_STKSZ, %rdx 1257905a36a2SIngo Molnar cmpq %rdx, 4*8(%rsp) 1258905a36a2SIngo Molnar /* If it is below the NMI stack, it is a normal NMI */ 1259905a36a2SIngo Molnar jb first_nmi 1260810bc075SAndy Lutomirski 1261810bc075SAndy Lutomirski /* Ah, it is within the NMI stack. */ 1262810bc075SAndy Lutomirski 1263810bc075SAndy Lutomirski testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1264810bc075SAndy Lutomirski jz first_nmi /* RSP was user controlled. */ 1265810bc075SAndy Lutomirski 1266810bc075SAndy Lutomirski /* This is a nested NMI. */ 1267905a36a2SIngo Molnar 1268905a36a2SIngo Molnarnested_nmi: 1269905a36a2SIngo Molnar /* 12700b22930eSAndy Lutomirski * Modify the "iret" frame to point to repeat_nmi, forcing another 12710b22930eSAndy Lutomirski * iteration of NMI handling. 1272905a36a2SIngo Molnar */ 127323a781e9SAndy Lutomirski subq $8, %rsp 1274905a36a2SIngo Molnar leaq -10*8(%rsp), %rdx 1275905a36a2SIngo Molnar pushq $__KERNEL_DS 1276905a36a2SIngo Molnar pushq %rdx 1277905a36a2SIngo Molnar pushfq 1278905a36a2SIngo Molnar pushq $__KERNEL_CS 1279905a36a2SIngo Molnar pushq $repeat_nmi 1280905a36a2SIngo Molnar 1281905a36a2SIngo Molnar /* Put stack back */ 1282905a36a2SIngo Molnar addq $(6*8), %rsp 1283905a36a2SIngo Molnar 1284905a36a2SIngo Molnarnested_nmi_out: 1285905a36a2SIngo Molnar popq %rdx 1286905a36a2SIngo Molnar 12870b22930eSAndy Lutomirski /* We are returning to kernel mode, so this cannot result in a fault. */ 1288929bacecSAndy Lutomirski iretq 1289905a36a2SIngo Molnar 1290905a36a2SIngo Molnarfirst_nmi: 12910b22930eSAndy Lutomirski /* Restore rdx. */ 1292905a36a2SIngo Molnar movq (%rsp), %rdx 1293905a36a2SIngo Molnar 129436f1a77bSAndy Lutomirski /* Make room for "NMI executing". */ 129536f1a77bSAndy Lutomirski pushq $0 1296905a36a2SIngo Molnar 12970b22930eSAndy Lutomirski /* Leave room for the "iret" frame */ 1298905a36a2SIngo Molnar subq $(5*8), %rsp 1299905a36a2SIngo Molnar 13000b22930eSAndy Lutomirski /* Copy the "original" frame to the "outermost" frame */ 1301905a36a2SIngo Molnar .rept 5 1302905a36a2SIngo Molnar pushq 11*8(%rsp) 1303905a36a2SIngo Molnar .endr 13048c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1305905a36a2SIngo Molnar 1306905a36a2SIngo Molnar /* Everything up to here is safe from nested NMIs */ 1307905a36a2SIngo Molnar 1308a97439aaSAndy Lutomirski#ifdef CONFIG_DEBUG_ENTRY 1309a97439aaSAndy Lutomirski /* 1310a97439aaSAndy Lutomirski * For ease of testing, unmask NMIs right away. Disabled by 1311a97439aaSAndy Lutomirski * default because IRET is very expensive. 1312a97439aaSAndy Lutomirski */ 1313a97439aaSAndy Lutomirski pushq $0 /* SS */ 1314a97439aaSAndy Lutomirski pushq %rsp /* RSP (minus 8 because of the previous push) */ 1315a97439aaSAndy Lutomirski addq $8, (%rsp) /* Fix up RSP */ 1316a97439aaSAndy Lutomirski pushfq /* RFLAGS */ 1317a97439aaSAndy Lutomirski pushq $__KERNEL_CS /* CS */ 1318a97439aaSAndy Lutomirski pushq $1f /* RIP */ 1319929bacecSAndy Lutomirski iretq /* continues at repeat_nmi below */ 13208c1f7558SJosh Poimboeuf UNWIND_HINT_IRET_REGS 1321a97439aaSAndy Lutomirski1: 1322a97439aaSAndy Lutomirski#endif 1323a97439aaSAndy Lutomirski 13240b22930eSAndy Lutomirskirepeat_nmi: 1325905a36a2SIngo Molnar /* 1326905a36a2SIngo Molnar * If there was a nested NMI, the first NMI's iret will return 1327905a36a2SIngo Molnar * here. But NMIs are still enabled and we can take another 1328905a36a2SIngo Molnar * nested NMI. The nested NMI checks the interrupted RIP to see 1329905a36a2SIngo Molnar * if it is between repeat_nmi and end_repeat_nmi, and if so 1330905a36a2SIngo Molnar * it will just return, as we are about to repeat an NMI anyway. 1331905a36a2SIngo Molnar * This makes it safe to copy to the stack frame that a nested 1332905a36a2SIngo Molnar * NMI will update. 13330b22930eSAndy Lutomirski * 13340b22930eSAndy Lutomirski * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 13350b22930eSAndy Lutomirski * we're repeating an NMI, gsbase has the same value that it had on 13360b22930eSAndy Lutomirski * the first iteration. paranoid_entry will load the kernel 13376271fef0SThomas Gleixner * gsbase if needed before we call exc_nmi(). "NMI executing" 133836f1a77bSAndy Lutomirski * is zero. 1339905a36a2SIngo Molnar */ 134036f1a77bSAndy Lutomirski movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1341905a36a2SIngo Molnar 13420b22930eSAndy Lutomirski /* 13430b22930eSAndy Lutomirski * Copy the "outermost" frame to the "iret" frame. NMIs that nest 13440b22930eSAndy Lutomirski * here must not modify the "iret" frame while we're writing to 13450b22930eSAndy Lutomirski * it or it will end up containing garbage. 13460b22930eSAndy Lutomirski */ 1347905a36a2SIngo Molnar addq $(10*8), %rsp 1348905a36a2SIngo Molnar .rept 5 1349905a36a2SIngo Molnar pushq -6*8(%rsp) 1350905a36a2SIngo Molnar .endr 1351905a36a2SIngo Molnar subq $(5*8), %rsp 1352905a36a2SIngo Molnarend_repeat_nmi: 1353905a36a2SIngo Molnar 1354905a36a2SIngo Molnar /* 13550b22930eSAndy Lutomirski * Everything below this point can be preempted by a nested NMI. 13560b22930eSAndy Lutomirski * If this happens, then the inner NMI will change the "iret" 13570b22930eSAndy Lutomirski * frame to point back to repeat_nmi. 1358905a36a2SIngo Molnar */ 1359905a36a2SIngo Molnar pushq $-1 /* ORIG_RAX: no syscall to restart */ 1360905a36a2SIngo Molnar 1361905a36a2SIngo Molnar /* 1362905a36a2SIngo Molnar * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1363905a36a2SIngo Molnar * as we should not be calling schedule in NMI context. 1364905a36a2SIngo Molnar * Even with normal interrupts enabled. An NMI should not be 1365905a36a2SIngo Molnar * setting NEED_RESCHED or anything that normal interrupts and 1366905a36a2SIngo Molnar * exceptions might do. 1367905a36a2SIngo Molnar */ 1368905a36a2SIngo Molnar call paranoid_entry 13698c1f7558SJosh Poimboeuf UNWIND_HINT_REGS 1370905a36a2SIngo Molnar 1371905a36a2SIngo Molnar movq %rsp, %rdi 1372905a36a2SIngo Molnar movq $-1, %rsi 13736271fef0SThomas Gleixner call exc_nmi 1374905a36a2SIngo Molnar 137516561f27SDave Hansen /* Always restore stashed CR3 value (see paranoid_entry) */ 137621e94459SPeter Zijlstra RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 13778a09317bSDave Hansen 1378c82965f9SChang S. Bae /* 1379c82965f9SChang S. Bae * The above invocation of paranoid_entry stored the GSBASE 1380c82965f9SChang S. Bae * related information in R/EBX depending on the availability 1381c82965f9SChang S. Bae * of FSGSBASE. 1382c82965f9SChang S. Bae * 1383c82965f9SChang S. Bae * If FSGSBASE is enabled, restore the saved GSBASE value 1384c82965f9SChang S. Bae * unconditionally, otherwise take the conditional SWAPGS path. 1385c82965f9SChang S. Bae */ 1386c82965f9SChang S. Bae ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1387c82965f9SChang S. Bae 1388c82965f9SChang S. Bae wrgsbase %rbx 1389c82965f9SChang S. Bae jmp nmi_restore 1390c82965f9SChang S. Bae 1391c82965f9SChang S. Baenmi_no_fsgsbase: 1392c82965f9SChang S. Bae /* EBX == 0 -> invoke SWAPGS */ 1393c82965f9SChang S. Bae testl %ebx, %ebx 1394905a36a2SIngo Molnar jnz nmi_restore 1395c82965f9SChang S. Bae 1396905a36a2SIngo Molnarnmi_swapgs: 139753c9d924SJuergen Gross swapgs 1398c82965f9SChang S. Bae 1399905a36a2SIngo Molnarnmi_restore: 1400502af0d7SDominik Brodowski POP_REGS 14010b22930eSAndy Lutomirski 1402471ee483SAndy Lutomirski /* 1403471ee483SAndy Lutomirski * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1404471ee483SAndy Lutomirski * at the "iret" frame. 1405471ee483SAndy Lutomirski */ 1406471ee483SAndy Lutomirski addq $6*8, %rsp 1407905a36a2SIngo Molnar 1408810bc075SAndy Lutomirski /* 1409810bc075SAndy Lutomirski * Clear "NMI executing". Set DF first so that we can easily 1410810bc075SAndy Lutomirski * distinguish the remaining code between here and IRET from 1411929bacecSAndy Lutomirski * the SYSCALL entry and exit paths. 1412929bacecSAndy Lutomirski * 1413929bacecSAndy Lutomirski * We arguably should just inspect RIP instead, but I (Andy) wrote 1414929bacecSAndy Lutomirski * this code when I had the misapprehension that Xen PV supported 1415929bacecSAndy Lutomirski * NMIs, and Xen PV would break that approach. 1416810bc075SAndy Lutomirski */ 1417810bc075SAndy Lutomirski std 1418810bc075SAndy Lutomirski movq $0, 5*8(%rsp) /* clear "NMI executing" */ 14190b22930eSAndy Lutomirski 14200b22930eSAndy Lutomirski /* 1421929bacecSAndy Lutomirski * iretq reads the "iret" frame and exits the NMI stack in a 1422929bacecSAndy Lutomirski * single instruction. We are returning to kernel mode, so this 1423929bacecSAndy Lutomirski * cannot result in a fault. Similarly, we don't need to worry 1424929bacecSAndy Lutomirski * about espfix64 on the way back to kernel mode. 14250b22930eSAndy Lutomirski */ 1426929bacecSAndy Lutomirski iretq 14276271fef0SThomas GleixnerSYM_CODE_END(asm_exc_nmi) 1428905a36a2SIngo Molnar 1429dffb3f9dSAndy Lutomirski#ifndef CONFIG_IA32_EMULATION 1430dffb3f9dSAndy Lutomirski/* 1431dffb3f9dSAndy Lutomirski * This handles SYSCALL from 32-bit code. There is no way to program 1432dffb3f9dSAndy Lutomirski * MSRs to fully disable 32-bit SYSCALL. 1433dffb3f9dSAndy Lutomirski */ 1434bc7b11c0SJiri SlabySYM_CODE_START(ignore_sysret) 14358c1f7558SJosh Poimboeuf UNWIND_HINT_EMPTY 1436*8f93402bSPeter Zijlstra ENDBR 1437905a36a2SIngo Molnar mov $-ENOSYS, %eax 1438b2b1d94cSJan Beulich sysretl 1439bc7b11c0SJiri SlabySYM_CODE_END(ignore_sysret) 1440dffb3f9dSAndy Lutomirski#endif 14412deb4be2SAndy Lutomirski 1442b9f6976bSThomas Gleixner.pushsection .text, "ax" 14430e25498fSEric W. BiedermanSYM_CODE_START(rewind_stack_and_make_dead) 14448c1f7558SJosh Poimboeuf UNWIND_HINT_FUNC 14452deb4be2SAndy Lutomirski /* Prevent any naive code from trying to unwind to our caller. */ 14462deb4be2SAndy Lutomirski xorl %ebp, %ebp 14472deb4be2SAndy Lutomirski 14482deb4be2SAndy Lutomirski movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 14498c1f7558SJosh Poimboeuf leaq -PTREGS_SIZE(%rax), %rsp 1450f977df7bSJann Horn UNWIND_HINT_REGS 14512deb4be2SAndy Lutomirski 14520e25498fSEric W. Biederman call make_task_dead 14530e25498fSEric W. BiedermanSYM_CODE_END(rewind_stack_and_make_dead) 1454b9f6976bSThomas Gleixner.popsection 1455