1453eafbeSSean Christopherson/* SPDX-License-Identifier: GPL-2.0 */ 2453eafbeSSean Christopherson#include <linux/linkage.h> 3453eafbeSSean Christopherson#include <asm/asm.h> 4*07853adcSJosh Poimboeuf#include <asm/asm-offsets.h> 55e0781dfSSean Christopherson#include <asm/bitsperlong.h> 65e0781dfSSean Christopherson#include <asm/kvm_vcpu_regs.h> 7f2fde6a5SRick Edgecombe#include <asm/nospec-branch.h> 8*07853adcSJosh Poimboeuf#include <asm/percpu.h> 9535f7ef2SSean Christopherson#include <asm/segment.h> 10bb066506SJosh Poimboeuf#include "run_flags.h" 115e0781dfSSean Christopherson 125e0781dfSSean Christopherson#define WORD_SIZE (BITS_PER_LONG / 8) 135e0781dfSSean Christopherson 145e0781dfSSean Christopherson#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE 155e0781dfSSean Christopherson#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE 165e0781dfSSean Christopherson#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE 175e0781dfSSean Christopherson#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE 185e0781dfSSean Christopherson/* Intentionally omit RSP as it's context switched by hardware */ 195e0781dfSSean Christopherson#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE 205e0781dfSSean Christopherson#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE 215e0781dfSSean Christopherson#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE 225e0781dfSSean Christopherson 235e0781dfSSean Christopherson#ifdef CONFIG_X86_64 245e0781dfSSean Christopherson#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE 255e0781dfSSean Christopherson#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE 265e0781dfSSean Christopherson#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE 275e0781dfSSean Christopherson#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE 285e0781dfSSean Christopherson#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE 295e0781dfSSean Christopherson#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE 305e0781dfSSean Christopherson#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE 315e0781dfSSean Christopherson#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE 325e0781dfSSean Christopherson#endif 33453eafbeSSean Christopherson 343ebccdf3SThomas Gleixner.section .noinstr.text, "ax" 35453eafbeSSean Christopherson 36453eafbeSSean Christopherson/** 37ee2fc635SSean Christopherson * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode 38fc02735bSJosh Poimboeuf * @vmx: struct vcpu_vmx * 395e0781dfSSean Christopherson * @regs: unsigned long * (to guest registers) 40bb066506SJosh Poimboeuf * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH 41fc02735bSJosh Poimboeuf * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl 425e0781dfSSean Christopherson * 435e0781dfSSean Christopherson * Returns: 44e75c3c3aSSean Christopherson * 0 on VM-Exit, 1 on VM-Fail 455e0781dfSSean Christopherson */ 466dcc5627SJiri SlabySYM_FUNC_START(__vmx_vcpu_run) 475e0781dfSSean Christopherson push %_ASM_BP 485e0781dfSSean Christopherson mov %_ASM_SP, %_ASM_BP 493b895ef4SSean Christopherson#ifdef CONFIG_X86_64 503b895ef4SSean Christopherson push %r15 513b895ef4SSean Christopherson push %r14 523b895ef4SSean Christopherson push %r13 533b895ef4SSean Christopherson push %r12 543b895ef4SSean Christopherson#else 553b895ef4SSean Christopherson push %edi 563b895ef4SSean Christopherson push %esi 573b895ef4SSean Christopherson#endif 583b895ef4SSean Christopherson push %_ASM_BX 595e0781dfSSean Christopherson 60fc02735bSJosh Poimboeuf /* Save @vmx for SPEC_CTRL handling */ 61fc02735bSJosh Poimboeuf push %_ASM_ARG1 62fc02735bSJosh Poimboeuf 63fc02735bSJosh Poimboeuf /* Save @flags for SPEC_CTRL handling */ 64fc02735bSJosh Poimboeuf push %_ASM_ARG3 65fc02735bSJosh Poimboeuf 665e0781dfSSean Christopherson /* 675e0781dfSSean Christopherson * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and 685e0781dfSSean Christopherson * @regs is needed after VM-Exit to save the guest's register values. 695e0781dfSSean Christopherson */ 705e0781dfSSean Christopherson push %_ASM_ARG2 715e0781dfSSean Christopherson 72bb066506SJosh Poimboeuf /* Copy @flags to BL, _ASM_ARG3 is volatile. */ 7377df5495SSean Christopherson mov %_ASM_ARG3B, %bl 7477df5495SSean Christopherson 758bd200d2SJosh Poimboeuf lea (%_ASM_SP), %_ASM_ARG2 765e0781dfSSean Christopherson call vmx_update_host_rsp 775e0781dfSSean Christopherson 78*07853adcSJosh Poimboeuf ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL 79*07853adcSJosh Poimboeuf 80*07853adcSJosh Poimboeuf /* 81*07853adcSJosh Poimboeuf * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 82*07853adcSJosh Poimboeuf * host's, write the MSR. 83*07853adcSJosh Poimboeuf * 84*07853adcSJosh Poimboeuf * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 85*07853adcSJosh Poimboeuf * there must not be any returns or indirect branches between this code 86*07853adcSJosh Poimboeuf * and vmentry. 87*07853adcSJosh Poimboeuf */ 88*07853adcSJosh Poimboeuf mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI 89*07853adcSJosh Poimboeuf movl VMX_spec_ctrl(%_ASM_DI), %edi 90*07853adcSJosh Poimboeuf movl PER_CPU_VAR(x86_spec_ctrl_current), %esi 91*07853adcSJosh Poimboeuf cmp %edi, %esi 92*07853adcSJosh Poimboeuf je .Lspec_ctrl_done 93*07853adcSJosh Poimboeuf mov $MSR_IA32_SPEC_CTRL, %ecx 94*07853adcSJosh Poimboeuf xor %edx, %edx 95*07853adcSJosh Poimboeuf mov %edi, %eax 96*07853adcSJosh Poimboeuf wrmsr 97*07853adcSJosh Poimboeuf 98*07853adcSJosh Poimboeuf.Lspec_ctrl_done: 99*07853adcSJosh Poimboeuf 100*07853adcSJosh Poimboeuf /* 101*07853adcSJosh Poimboeuf * Since vmentry is serializing on affected CPUs, there's no need for 102*07853adcSJosh Poimboeuf * an LFENCE to stop speculation from skipping the wrmsr. 103*07853adcSJosh Poimboeuf */ 104*07853adcSJosh Poimboeuf 105a62fd5a7SSean Christopherson /* Load @regs to RAX. */ 106a62fd5a7SSean Christopherson mov (%_ASM_SP), %_ASM_AX 1075e0781dfSSean Christopherson 1085e0781dfSSean Christopherson /* Check if vmlaunch or vmresume is needed */ 109bb066506SJosh Poimboeuf testb $VMX_RUN_VMRESUME, %bl 1105e0781dfSSean Christopherson 1115e0781dfSSean Christopherson /* Load guest registers. Don't clobber flags. */ 112a62fd5a7SSean Christopherson mov VCPU_RCX(%_ASM_AX), %_ASM_CX 113a62fd5a7SSean Christopherson mov VCPU_RDX(%_ASM_AX), %_ASM_DX 114bb03911fSUros Bizjak mov VCPU_RBX(%_ASM_AX), %_ASM_BX 115bb03911fSUros Bizjak mov VCPU_RBP(%_ASM_AX), %_ASM_BP 116a62fd5a7SSean Christopherson mov VCPU_RSI(%_ASM_AX), %_ASM_SI 117a62fd5a7SSean Christopherson mov VCPU_RDI(%_ASM_AX), %_ASM_DI 1185e0781dfSSean Christopherson#ifdef CONFIG_X86_64 119a62fd5a7SSean Christopherson mov VCPU_R8 (%_ASM_AX), %r8 120a62fd5a7SSean Christopherson mov VCPU_R9 (%_ASM_AX), %r9 121a62fd5a7SSean Christopherson mov VCPU_R10(%_ASM_AX), %r10 122a62fd5a7SSean Christopherson mov VCPU_R11(%_ASM_AX), %r11 123a62fd5a7SSean Christopherson mov VCPU_R12(%_ASM_AX), %r12 124a62fd5a7SSean Christopherson mov VCPU_R13(%_ASM_AX), %r13 125a62fd5a7SSean Christopherson mov VCPU_R14(%_ASM_AX), %r14 126a62fd5a7SSean Christopherson mov VCPU_R15(%_ASM_AX), %r15 1275e0781dfSSean Christopherson#endif 128b6852ae7SSean Christopherson /* Load guest RAX. This kills the @regs pointer! */ 129a62fd5a7SSean Christopherson mov VCPU_RAX(%_ASM_AX), %_ASM_AX 1305e0781dfSSean Christopherson 1318bd200d2SJosh Poimboeuf /* Check EFLAGS.ZF from 'testb' above */ 132bb066506SJosh Poimboeuf jz .Lvmlaunch 1335e0781dfSSean Christopherson 1348bd200d2SJosh Poimboeuf /* 1358bd200d2SJosh Poimboeuf * After a successful VMRESUME/VMLAUNCH, control flow "magically" 1368bd200d2SJosh Poimboeuf * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. 1378bd200d2SJosh Poimboeuf * So this isn't a typical function and objtool needs to be told to 1388bd200d2SJosh Poimboeuf * save the unwind state here and restore it below. 1398bd200d2SJosh Poimboeuf */ 1408bd200d2SJosh Poimboeuf UNWIND_HINT_SAVE 1418bd200d2SJosh Poimboeuf 1428bd200d2SJosh Poimboeuf/* 1438bd200d2SJosh Poimboeuf * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at 1448bd200d2SJosh Poimboeuf * the 'vmx_vmexit' label below. 1458bd200d2SJosh Poimboeuf */ 1468bd200d2SJosh Poimboeuf.Lvmresume: 1478bd200d2SJosh Poimboeuf vmresume 1488bd200d2SJosh Poimboeuf jmp .Lvmfail 1498bd200d2SJosh Poimboeuf 1508bd200d2SJosh Poimboeuf.Lvmlaunch: 1518bd200d2SJosh Poimboeuf vmlaunch 1528bd200d2SJosh Poimboeuf jmp .Lvmfail 1538bd200d2SJosh Poimboeuf 1548bd200d2SJosh Poimboeuf _ASM_EXTABLE(.Lvmresume, .Lfixup) 1558bd200d2SJosh Poimboeuf _ASM_EXTABLE(.Lvmlaunch, .Lfixup) 1568bd200d2SJosh Poimboeuf 1578bd200d2SJosh PoimboeufSYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) 1588bd200d2SJosh Poimboeuf 1598bd200d2SJosh Poimboeuf /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ 1608bd200d2SJosh Poimboeuf UNWIND_HINT_RESTORE 1618bd200d2SJosh Poimboeuf ENDBR 1625e0781dfSSean Christopherson 163a62fd5a7SSean Christopherson /* Temporarily save guest's RAX. */ 164a62fd5a7SSean Christopherson push %_ASM_AX 1655e0781dfSSean Christopherson 166a62fd5a7SSean Christopherson /* Reload @regs to RAX. */ 167a62fd5a7SSean Christopherson mov WORD_SIZE(%_ASM_SP), %_ASM_AX 1685e0781dfSSean Christopherson 169a62fd5a7SSean Christopherson /* Save all guest registers, including RAX from the stack */ 170c16312f4SUros Bizjak pop VCPU_RAX(%_ASM_AX) 171a62fd5a7SSean Christopherson mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 172a62fd5a7SSean Christopherson mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 173bb03911fSUros Bizjak mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 174bb03911fSUros Bizjak mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 175a62fd5a7SSean Christopherson mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 176a62fd5a7SSean Christopherson mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 1775e0781dfSSean Christopherson#ifdef CONFIG_X86_64 178a62fd5a7SSean Christopherson mov %r8, VCPU_R8 (%_ASM_AX) 179a62fd5a7SSean Christopherson mov %r9, VCPU_R9 (%_ASM_AX) 180a62fd5a7SSean Christopherson mov %r10, VCPU_R10(%_ASM_AX) 181a62fd5a7SSean Christopherson mov %r11, VCPU_R11(%_ASM_AX) 182a62fd5a7SSean Christopherson mov %r12, VCPU_R12(%_ASM_AX) 183a62fd5a7SSean Christopherson mov %r13, VCPU_R13(%_ASM_AX) 184a62fd5a7SSean Christopherson mov %r14, VCPU_R14(%_ASM_AX) 185a62fd5a7SSean Christopherson mov %r15, VCPU_R15(%_ASM_AX) 1865e0781dfSSean Christopherson#endif 1875e0781dfSSean Christopherson 188fc02735bSJosh Poimboeuf /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ 189fc02735bSJosh Poimboeuf xor %ebx, %ebx 1905e0781dfSSean Christopherson 1918bd200d2SJosh Poimboeuf.Lclear_regs: 1925e0781dfSSean Christopherson /* 193fc02735bSJosh Poimboeuf * Clear all general purpose registers except RSP and RBX to prevent 1945e0781dfSSean Christopherson * speculative use of the guest's values, even those that are reloaded 1955e0781dfSSean Christopherson * via the stack. In theory, an L1 cache miss when restoring registers 1965e0781dfSSean Christopherson * could lead to speculative execution with the guest's values. 1975e0781dfSSean Christopherson * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 198e75c3c3aSSean Christopherson * free. RSP and RAX are exempt as RSP is restored by hardware during 199fc02735bSJosh Poimboeuf * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return 200fc02735bSJosh Poimboeuf * value. 2015e0781dfSSean Christopherson */ 202fc02735bSJosh Poimboeuf xor %eax, %eax 2038bd200d2SJosh Poimboeuf xor %ecx, %ecx 2044f44c4eeSSean Christopherson xor %edx, %edx 205bb03911fSUros Bizjak xor %ebp, %ebp 2064f44c4eeSSean Christopherson xor %esi, %esi 2074f44c4eeSSean Christopherson xor %edi, %edi 2085e0781dfSSean Christopherson#ifdef CONFIG_X86_64 2095e0781dfSSean Christopherson xor %r8d, %r8d 2105e0781dfSSean Christopherson xor %r9d, %r9d 2115e0781dfSSean Christopherson xor %r10d, %r10d 2125e0781dfSSean Christopherson xor %r11d, %r11d 2135e0781dfSSean Christopherson xor %r12d, %r12d 2145e0781dfSSean Christopherson xor %r13d, %r13d 2155e0781dfSSean Christopherson xor %r14d, %r14d 2165e0781dfSSean Christopherson xor %r15d, %r15d 2175e0781dfSSean Christopherson#endif 2185e0781dfSSean Christopherson 2195e0781dfSSean Christopherson /* "POP" @regs. */ 2205e0781dfSSean Christopherson add $WORD_SIZE, %_ASM_SP 2213b895ef4SSean Christopherson 222fc02735bSJosh Poimboeuf /* 223fc02735bSJosh Poimboeuf * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before 224fc02735bSJosh Poimboeuf * the first unbalanced RET after vmexit! 225fc02735bSJosh Poimboeuf * 2269756bba2SJosh Poimboeuf * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB 2279756bba2SJosh Poimboeuf * entries and (in some cases) RSB underflow. 228fc02735bSJosh Poimboeuf * 229fc02735bSJosh Poimboeuf * eIBRS has its own protection against poisoned RSB, so it doesn't 230fc02735bSJosh Poimboeuf * need the RSB filling sequence. But it does need to be enabled 231fc02735bSJosh Poimboeuf * before the first unbalanced RET. 232fc02735bSJosh Poimboeuf */ 233fc02735bSJosh Poimboeuf 2349756bba2SJosh Poimboeuf FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT 235fc02735bSJosh Poimboeuf 236fc02735bSJosh Poimboeuf pop %_ASM_ARG2 /* @flags */ 237fc02735bSJosh Poimboeuf pop %_ASM_ARG1 /* @vmx */ 238fc02735bSJosh Poimboeuf 239fc02735bSJosh Poimboeuf call vmx_spec_ctrl_restore_host 240fc02735bSJosh Poimboeuf 241fc02735bSJosh Poimboeuf /* Put return value in AX */ 242fc02735bSJosh Poimboeuf mov %_ASM_BX, %_ASM_AX 243fc02735bSJosh Poimboeuf 2448bd200d2SJosh Poimboeuf pop %_ASM_BX 2453b895ef4SSean Christopherson#ifdef CONFIG_X86_64 2463b895ef4SSean Christopherson pop %r12 2473b895ef4SSean Christopherson pop %r13 2483b895ef4SSean Christopherson pop %r14 2493b895ef4SSean Christopherson pop %r15 2503b895ef4SSean Christopherson#else 2513b895ef4SSean Christopherson pop %esi 2523b895ef4SSean Christopherson pop %edi 2533b895ef4SSean Christopherson#endif 2545e0781dfSSean Christopherson pop %_ASM_BP 255f94909ceSPeter Zijlstra RET 2565e0781dfSSean Christopherson 2578bd200d2SJosh Poimboeuf.Lfixup: 2588bd200d2SJosh Poimboeuf cmpb $0, kvm_rebooting 2598bd200d2SJosh Poimboeuf jne .Lvmfail 2608bd200d2SJosh Poimboeuf ud2 2618bd200d2SJosh Poimboeuf.Lvmfail: 2628bd200d2SJosh Poimboeuf /* VM-Fail: set return value to 1 */ 263fc02735bSJosh Poimboeuf mov $1, %_ASM_BX 2648bd200d2SJosh Poimboeuf jmp .Lclear_regs 2658bd200d2SJosh Poimboeuf 2666dcc5627SJiri SlabySYM_FUNC_END(__vmx_vcpu_run) 267842f4be9SSean Christopherson 2683ebccdf3SThomas Gleixner 2693ebccdf3SThomas Gleixner.section .text, "ax" 2703ebccdf3SThomas Gleixner 271842f4be9SSean Christopherson/** 272842f4be9SSean Christopherson * vmread_error_trampoline - Trampoline from inline asm to vmread_error() 273842f4be9SSean Christopherson * @field: VMCS field encoding that failed 274842f4be9SSean Christopherson * @fault: %true if the VMREAD faulted, %false if it failed 275842f4be9SSean Christopherson 276842f4be9SSean Christopherson * Save and restore volatile registers across a call to vmread_error(). Note, 277842f4be9SSean Christopherson * all parameters are passed on the stack. 278842f4be9SSean Christopherson */ 279842f4be9SSean ChristophersonSYM_FUNC_START(vmread_error_trampoline) 280842f4be9SSean Christopherson push %_ASM_BP 281842f4be9SSean Christopherson mov %_ASM_SP, %_ASM_BP 282842f4be9SSean Christopherson 283842f4be9SSean Christopherson push %_ASM_AX 284842f4be9SSean Christopherson push %_ASM_CX 285842f4be9SSean Christopherson push %_ASM_DX 286842f4be9SSean Christopherson#ifdef CONFIG_X86_64 287842f4be9SSean Christopherson push %rdi 288842f4be9SSean Christopherson push %rsi 289842f4be9SSean Christopherson push %r8 290842f4be9SSean Christopherson push %r9 291842f4be9SSean Christopherson push %r10 292842f4be9SSean Christopherson push %r11 293842f4be9SSean Christopherson#endif 294842f4be9SSean Christopherson#ifdef CONFIG_X86_64 295842f4be9SSean Christopherson /* Load @field and @fault to arg1 and arg2 respectively. */ 296842f4be9SSean Christopherson mov 3*WORD_SIZE(%rbp), %_ASM_ARG2 297842f4be9SSean Christopherson mov 2*WORD_SIZE(%rbp), %_ASM_ARG1 298842f4be9SSean Christopherson#else 299842f4be9SSean Christopherson /* Parameters are passed on the stack for 32-bit (see asmlinkage). */ 300842f4be9SSean Christopherson push 3*WORD_SIZE(%ebp) 301842f4be9SSean Christopherson push 2*WORD_SIZE(%ebp) 302842f4be9SSean Christopherson#endif 303842f4be9SSean Christopherson 304842f4be9SSean Christopherson call vmread_error 305842f4be9SSean Christopherson 306842f4be9SSean Christopherson#ifndef CONFIG_X86_64 307842f4be9SSean Christopherson add $8, %esp 308842f4be9SSean Christopherson#endif 309842f4be9SSean Christopherson 310842f4be9SSean Christopherson /* Zero out @fault, which will be popped into the result register. */ 311842f4be9SSean Christopherson _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) 312842f4be9SSean Christopherson 313842f4be9SSean Christopherson#ifdef CONFIG_X86_64 314842f4be9SSean Christopherson pop %r11 315842f4be9SSean Christopherson pop %r10 316842f4be9SSean Christopherson pop %r9 317842f4be9SSean Christopherson pop %r8 318842f4be9SSean Christopherson pop %rsi 319842f4be9SSean Christopherson pop %rdi 320842f4be9SSean Christopherson#endif 321842f4be9SSean Christopherson pop %_ASM_DX 322842f4be9SSean Christopherson pop %_ASM_CX 323842f4be9SSean Christopherson pop %_ASM_AX 324842f4be9SSean Christopherson pop %_ASM_BP 325842f4be9SSean Christopherson 326f94909ceSPeter Zijlstra RET 327842f4be9SSean ChristophersonSYM_FUNC_END(vmread_error_trampoline) 328535f7ef2SSean Christopherson 329535f7ef2SSean ChristophersonSYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) 330535f7ef2SSean Christopherson /* 331535f7ef2SSean Christopherson * Unconditionally create a stack frame, getting the correct RSP on the 332535f7ef2SSean Christopherson * stack (for x86-64) would take two instructions anyways, and RBP can 333535f7ef2SSean Christopherson * be used to restore RSP to make objtool happy (see below). 334535f7ef2SSean Christopherson */ 335535f7ef2SSean Christopherson push %_ASM_BP 336535f7ef2SSean Christopherson mov %_ASM_SP, %_ASM_BP 337535f7ef2SSean Christopherson 338535f7ef2SSean Christopherson#ifdef CONFIG_X86_64 339535f7ef2SSean Christopherson /* 340535f7ef2SSean Christopherson * Align RSP to a 16-byte boundary (to emulate CPU behavior) before 341535f7ef2SSean Christopherson * creating the synthetic interrupt stack frame for the IRQ/NMI. 342535f7ef2SSean Christopherson */ 343535f7ef2SSean Christopherson and $-16, %rsp 344535f7ef2SSean Christopherson push $__KERNEL_DS 345535f7ef2SSean Christopherson push %rbp 346535f7ef2SSean Christopherson#endif 347535f7ef2SSean Christopherson pushf 348535f7ef2SSean Christopherson push $__KERNEL_CS 349535f7ef2SSean Christopherson CALL_NOSPEC _ASM_ARG1 350535f7ef2SSean Christopherson 351535f7ef2SSean Christopherson /* 352535f7ef2SSean Christopherson * "Restore" RSP from RBP, even though IRET has already unwound RSP to 353535f7ef2SSean Christopherson * the correct value. objtool doesn't know the callee will IRET and, 354535f7ef2SSean Christopherson * without the explicit restore, thinks the stack is getting walloped. 355535f7ef2SSean Christopherson * Using an unwind hint is problematic due to x86-64's dynamic alignment. 356535f7ef2SSean Christopherson */ 357535f7ef2SSean Christopherson mov %_ASM_BP, %_ASM_SP 358535f7ef2SSean Christopherson pop %_ASM_BP 359f94909ceSPeter Zijlstra RET 360535f7ef2SSean ChristophersonSYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) 361