1453eafbeSSean Christopherson/* SPDX-License-Identifier: GPL-2.0 */ 2453eafbeSSean Christopherson#include <linux/linkage.h> 3453eafbeSSean Christopherson#include <asm/asm.h> 45e0781dfSSean Christopherson#include <asm/bitsperlong.h> 55e0781dfSSean Christopherson#include <asm/kvm_vcpu_regs.h> 6f2fde6a5SRick Edgecombe#include <asm/nospec-branch.h> 707853adcSJosh Poimboeuf#include <asm/percpu.h> 8535f7ef2SSean Christopherson#include <asm/segment.h> 9debc5a1eSPaolo Bonzini#include "kvm-asm-offsets.h" 10bb066506SJosh Poimboeuf#include "run_flags.h" 115e0781dfSSean Christopherson 125e0781dfSSean Christopherson#define WORD_SIZE (BITS_PER_LONG / 8) 135e0781dfSSean Christopherson 145e0781dfSSean Christopherson#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE 155e0781dfSSean Christopherson#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE 165e0781dfSSean Christopherson#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE 175e0781dfSSean Christopherson#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE 185e0781dfSSean Christopherson/* Intentionally omit RSP as it's context switched by hardware */ 195e0781dfSSean Christopherson#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE 205e0781dfSSean Christopherson#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE 215e0781dfSSean Christopherson#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE 225e0781dfSSean Christopherson 235e0781dfSSean Christopherson#ifdef CONFIG_X86_64 245e0781dfSSean Christopherson#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE 255e0781dfSSean Christopherson#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE 265e0781dfSSean Christopherson#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE 275e0781dfSSean Christopherson#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE 285e0781dfSSean Christopherson#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE 295e0781dfSSean Christopherson#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE 305e0781dfSSean Christopherson#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE 315e0781dfSSean Christopherson#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE 325e0781dfSSean Christopherson#endif 33453eafbeSSean Christopherson 344f76e86fSSean Christopherson.macro VMX_DO_EVENT_IRQOFF call_insn call_target 354f76e86fSSean Christopherson /* 364f76e86fSSean Christopherson * Unconditionally create a stack frame, getting the correct RSP on the 374f76e86fSSean Christopherson * stack (for x86-64) would take two instructions anyways, and RBP can 384f76e86fSSean Christopherson * be used to restore RSP to make objtool happy (see below). 394f76e86fSSean Christopherson */ 404f76e86fSSean Christopherson push %_ASM_BP 414f76e86fSSean Christopherson mov %_ASM_SP, %_ASM_BP 424f76e86fSSean Christopherson 434f76e86fSSean Christopherson#ifdef CONFIG_X86_64 444f76e86fSSean Christopherson /* 454f76e86fSSean Christopherson * Align RSP to a 16-byte boundary (to emulate CPU behavior) before 464f76e86fSSean Christopherson * creating the synthetic interrupt stack frame for the IRQ/NMI. 474f76e86fSSean Christopherson */ 484f76e86fSSean Christopherson and $-16, %rsp 494f76e86fSSean Christopherson push $__KERNEL_DS 504f76e86fSSean Christopherson push %rbp 514f76e86fSSean Christopherson#endif 524f76e86fSSean Christopherson pushf 534f76e86fSSean Christopherson push $__KERNEL_CS 544f76e86fSSean Christopherson \call_insn \call_target 554f76e86fSSean Christopherson 564f76e86fSSean Christopherson /* 574f76e86fSSean Christopherson * "Restore" RSP from RBP, even though IRET has already unwound RSP to 584f76e86fSSean Christopherson * the correct value. objtool doesn't know the callee will IRET and, 594f76e86fSSean Christopherson * without the explicit restore, thinks the stack is getting walloped. 604f76e86fSSean Christopherson * Using an unwind hint is problematic due to x86-64's dynamic alignment. 614f76e86fSSean Christopherson */ 624f76e86fSSean Christopherson mov %_ASM_BP, %_ASM_SP 634f76e86fSSean Christopherson pop %_ASM_BP 644f76e86fSSean Christopherson RET 654f76e86fSSean Christopherson.endm 664f76e86fSSean Christopherson 673ebccdf3SThomas Gleixner.section .noinstr.text, "ax" 68453eafbeSSean Christopherson 69453eafbeSSean Christopherson/** 70ee2fc635SSean Christopherson * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode 71fc02735bSJosh Poimboeuf * @vmx: struct vcpu_vmx * 725e0781dfSSean Christopherson * @regs: unsigned long * (to guest registers) 73bb066506SJosh Poimboeuf * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH 74fc02735bSJosh Poimboeuf * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl 755e0781dfSSean Christopherson * 765e0781dfSSean Christopherson * Returns: 77e75c3c3aSSean Christopherson * 0 on VM-Exit, 1 on VM-Fail 785e0781dfSSean Christopherson */ 796dcc5627SJiri SlabySYM_FUNC_START(__vmx_vcpu_run) 805e0781dfSSean Christopherson push %_ASM_BP 815e0781dfSSean Christopherson mov %_ASM_SP, %_ASM_BP 823b895ef4SSean Christopherson#ifdef CONFIG_X86_64 833b895ef4SSean Christopherson push %r15 843b895ef4SSean Christopherson push %r14 853b895ef4SSean Christopherson push %r13 863b895ef4SSean Christopherson push %r12 873b895ef4SSean Christopherson#else 883b895ef4SSean Christopherson push %edi 893b895ef4SSean Christopherson push %esi 903b895ef4SSean Christopherson#endif 913b895ef4SSean Christopherson push %_ASM_BX 925e0781dfSSean Christopherson 93fc02735bSJosh Poimboeuf /* Save @vmx for SPEC_CTRL handling */ 94fc02735bSJosh Poimboeuf push %_ASM_ARG1 95fc02735bSJosh Poimboeuf 96fc02735bSJosh Poimboeuf /* Save @flags for SPEC_CTRL handling */ 97fc02735bSJosh Poimboeuf push %_ASM_ARG3 98fc02735bSJosh Poimboeuf 995e0781dfSSean Christopherson /* 1005e0781dfSSean Christopherson * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and 1015e0781dfSSean Christopherson * @regs is needed after VM-Exit to save the guest's register values. 1025e0781dfSSean Christopherson */ 1035e0781dfSSean Christopherson push %_ASM_ARG2 1045e0781dfSSean Christopherson 1055a7a6477SSean Christopherson /* Copy @flags to EBX, _ASM_ARG3 is volatile. */ 1065a7a6477SSean Christopherson mov %_ASM_ARG3L, %ebx 10777df5495SSean Christopherson 1088bd200d2SJosh Poimboeuf lea (%_ASM_SP), %_ASM_ARG2 1095e0781dfSSean Christopherson call vmx_update_host_rsp 1105e0781dfSSean Christopherson 11107853adcSJosh Poimboeuf ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL 11207853adcSJosh Poimboeuf 11307853adcSJosh Poimboeuf /* 11407853adcSJosh Poimboeuf * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 11507853adcSJosh Poimboeuf * host's, write the MSR. 11607853adcSJosh Poimboeuf * 11707853adcSJosh Poimboeuf * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 11807853adcSJosh Poimboeuf * there must not be any returns or indirect branches between this code 11907853adcSJosh Poimboeuf * and vmentry. 12007853adcSJosh Poimboeuf */ 12107853adcSJosh Poimboeuf mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI 12207853adcSJosh Poimboeuf movl VMX_spec_ctrl(%_ASM_DI), %edi 12307853adcSJosh Poimboeuf movl PER_CPU_VAR(x86_spec_ctrl_current), %esi 12407853adcSJosh Poimboeuf cmp %edi, %esi 12507853adcSJosh Poimboeuf je .Lspec_ctrl_done 12607853adcSJosh Poimboeuf mov $MSR_IA32_SPEC_CTRL, %ecx 12707853adcSJosh Poimboeuf xor %edx, %edx 12807853adcSJosh Poimboeuf mov %edi, %eax 12907853adcSJosh Poimboeuf wrmsr 13007853adcSJosh Poimboeuf 13107853adcSJosh Poimboeuf.Lspec_ctrl_done: 13207853adcSJosh Poimboeuf 13307853adcSJosh Poimboeuf /* 13407853adcSJosh Poimboeuf * Since vmentry is serializing on affected CPUs, there's no need for 13507853adcSJosh Poimboeuf * an LFENCE to stop speculation from skipping the wrmsr. 13607853adcSJosh Poimboeuf */ 13707853adcSJosh Poimboeuf 138a62fd5a7SSean Christopherson /* Load @regs to RAX. */ 139a62fd5a7SSean Christopherson mov (%_ASM_SP), %_ASM_AX 1405e0781dfSSean Christopherson 1415e0781dfSSean Christopherson /* Check if vmlaunch or vmresume is needed */ 142ae466912SPawan Gupta bt $VMX_RUN_VMRESUME_SHIFT, %ebx 1435e0781dfSSean Christopherson 1445e0781dfSSean Christopherson /* Load guest registers. Don't clobber flags. */ 145a62fd5a7SSean Christopherson mov VCPU_RCX(%_ASM_AX), %_ASM_CX 146a62fd5a7SSean Christopherson mov VCPU_RDX(%_ASM_AX), %_ASM_DX 147bb03911fSUros Bizjak mov VCPU_RBX(%_ASM_AX), %_ASM_BX 148bb03911fSUros Bizjak mov VCPU_RBP(%_ASM_AX), %_ASM_BP 149a62fd5a7SSean Christopherson mov VCPU_RSI(%_ASM_AX), %_ASM_SI 150a62fd5a7SSean Christopherson mov VCPU_RDI(%_ASM_AX), %_ASM_DI 1515e0781dfSSean Christopherson#ifdef CONFIG_X86_64 152a62fd5a7SSean Christopherson mov VCPU_R8 (%_ASM_AX), %r8 153a62fd5a7SSean Christopherson mov VCPU_R9 (%_ASM_AX), %r9 154a62fd5a7SSean Christopherson mov VCPU_R10(%_ASM_AX), %r10 155a62fd5a7SSean Christopherson mov VCPU_R11(%_ASM_AX), %r11 156a62fd5a7SSean Christopherson mov VCPU_R12(%_ASM_AX), %r12 157a62fd5a7SSean Christopherson mov VCPU_R13(%_ASM_AX), %r13 158a62fd5a7SSean Christopherson mov VCPU_R14(%_ASM_AX), %r14 159a62fd5a7SSean Christopherson mov VCPU_R15(%_ASM_AX), %r15 1605e0781dfSSean Christopherson#endif 161b6852ae7SSean Christopherson /* Load guest RAX. This kills the @regs pointer! */ 162a62fd5a7SSean Christopherson mov VCPU_RAX(%_ASM_AX), %_ASM_AX 1635e0781dfSSean Christopherson 164e81742f6SPawan Gupta /* Clobbers EFLAGS.ZF */ 165e81742f6SPawan Gupta CLEAR_CPU_BUFFERS 166e81742f6SPawan Gupta 167ae466912SPawan Gupta /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */ 168ae466912SPawan Gupta jnc .Lvmlaunch 1695e0781dfSSean Christopherson 1708bd200d2SJosh Poimboeuf /* 1718bd200d2SJosh Poimboeuf * After a successful VMRESUME/VMLAUNCH, control flow "magically" 1728bd200d2SJosh Poimboeuf * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. 1738bd200d2SJosh Poimboeuf * So this isn't a typical function and objtool needs to be told to 1748bd200d2SJosh Poimboeuf * save the unwind state here and restore it below. 1758bd200d2SJosh Poimboeuf */ 1768bd200d2SJosh Poimboeuf UNWIND_HINT_SAVE 1778bd200d2SJosh Poimboeuf 1788bd200d2SJosh Poimboeuf/* 1798bd200d2SJosh Poimboeuf * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at 1808bd200d2SJosh Poimboeuf * the 'vmx_vmexit' label below. 1818bd200d2SJosh Poimboeuf */ 1828bd200d2SJosh Poimboeuf.Lvmresume: 1838bd200d2SJosh Poimboeuf vmresume 1848bd200d2SJosh Poimboeuf jmp .Lvmfail 1858bd200d2SJosh Poimboeuf 1868bd200d2SJosh Poimboeuf.Lvmlaunch: 1878bd200d2SJosh Poimboeuf vmlaunch 1888bd200d2SJosh Poimboeuf jmp .Lvmfail 1898bd200d2SJosh Poimboeuf 1908bd200d2SJosh Poimboeuf _ASM_EXTABLE(.Lvmresume, .Lfixup) 1918bd200d2SJosh Poimboeuf _ASM_EXTABLE(.Lvmlaunch, .Lfixup) 1928bd200d2SJosh Poimboeuf 193331f2297SJon KohlerSYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL) 1948bd200d2SJosh Poimboeuf 1958bd200d2SJosh Poimboeuf /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ 1968bd200d2SJosh Poimboeuf UNWIND_HINT_RESTORE 1978bd200d2SJosh Poimboeuf ENDBR 1985e0781dfSSean Christopherson 199a62fd5a7SSean Christopherson /* Temporarily save guest's RAX. */ 200a62fd5a7SSean Christopherson push %_ASM_AX 2015e0781dfSSean Christopherson 202a62fd5a7SSean Christopherson /* Reload @regs to RAX. */ 203a62fd5a7SSean Christopherson mov WORD_SIZE(%_ASM_SP), %_ASM_AX 2045e0781dfSSean Christopherson 205a62fd5a7SSean Christopherson /* Save all guest registers, including RAX from the stack */ 206c16312f4SUros Bizjak pop VCPU_RAX(%_ASM_AX) 207a62fd5a7SSean Christopherson mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 208a62fd5a7SSean Christopherson mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 209bb03911fSUros Bizjak mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 210bb03911fSUros Bizjak mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 211a62fd5a7SSean Christopherson mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 212a62fd5a7SSean Christopherson mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 2135e0781dfSSean Christopherson#ifdef CONFIG_X86_64 214a62fd5a7SSean Christopherson mov %r8, VCPU_R8 (%_ASM_AX) 215a62fd5a7SSean Christopherson mov %r9, VCPU_R9 (%_ASM_AX) 216a62fd5a7SSean Christopherson mov %r10, VCPU_R10(%_ASM_AX) 217a62fd5a7SSean Christopherson mov %r11, VCPU_R11(%_ASM_AX) 218a62fd5a7SSean Christopherson mov %r12, VCPU_R12(%_ASM_AX) 219a62fd5a7SSean Christopherson mov %r13, VCPU_R13(%_ASM_AX) 220a62fd5a7SSean Christopherson mov %r14, VCPU_R14(%_ASM_AX) 221a62fd5a7SSean Christopherson mov %r15, VCPU_R15(%_ASM_AX) 2225e0781dfSSean Christopherson#endif 2235e0781dfSSean Christopherson 224fc02735bSJosh Poimboeuf /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ 225fc02735bSJosh Poimboeuf xor %ebx, %ebx 2265e0781dfSSean Christopherson 2278bd200d2SJosh Poimboeuf.Lclear_regs: 228afe30b59SUros Bizjak /* Discard @regs. The register is irrelevant, it just can't be RBX. */ 229afe30b59SUros Bizjak pop %_ASM_AX 230afe30b59SUros Bizjak 2315e0781dfSSean Christopherson /* 232fc02735bSJosh Poimboeuf * Clear all general purpose registers except RSP and RBX to prevent 2335e0781dfSSean Christopherson * speculative use of the guest's values, even those that are reloaded 2345e0781dfSSean Christopherson * via the stack. In theory, an L1 cache miss when restoring registers 2355e0781dfSSean Christopherson * could lead to speculative execution with the guest's values. 2365e0781dfSSean Christopherson * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 237afe30b59SUros Bizjak * free. RSP and RBX are exempt as RSP is restored by hardware during 238fc02735bSJosh Poimboeuf * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return 239fc02735bSJosh Poimboeuf * value. 2405e0781dfSSean Christopherson */ 241fc02735bSJosh Poimboeuf xor %eax, %eax 2428bd200d2SJosh Poimboeuf xor %ecx, %ecx 2434f44c4eeSSean Christopherson xor %edx, %edx 244bb03911fSUros Bizjak xor %ebp, %ebp 2454f44c4eeSSean Christopherson xor %esi, %esi 2464f44c4eeSSean Christopherson xor %edi, %edi 2475e0781dfSSean Christopherson#ifdef CONFIG_X86_64 2485e0781dfSSean Christopherson xor %r8d, %r8d 2495e0781dfSSean Christopherson xor %r9d, %r9d 2505e0781dfSSean Christopherson xor %r10d, %r10d 2515e0781dfSSean Christopherson xor %r11d, %r11d 2525e0781dfSSean Christopherson xor %r12d, %r12d 2535e0781dfSSean Christopherson xor %r13d, %r13d 2545e0781dfSSean Christopherson xor %r14d, %r14d 2555e0781dfSSean Christopherson xor %r15d, %r15d 2565e0781dfSSean Christopherson#endif 2575e0781dfSSean Christopherson 258fc02735bSJosh Poimboeuf /* 259fc02735bSJosh Poimboeuf * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before 260fc02735bSJosh Poimboeuf * the first unbalanced RET after vmexit! 261fc02735bSJosh Poimboeuf * 2629756bba2SJosh Poimboeuf * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB 2639756bba2SJosh Poimboeuf * entries and (in some cases) RSB underflow. 264fc02735bSJosh Poimboeuf * 265fc02735bSJosh Poimboeuf * eIBRS has its own protection against poisoned RSB, so it doesn't 2662b129932SDaniel Sneddon * need the RSB filling sequence. But it does need to be enabled, and a 2672b129932SDaniel Sneddon * single call to retire, before the first unbalanced RET. 268fc02735bSJosh Poimboeuf */ 269fc02735bSJosh Poimboeuf 2702b129932SDaniel Sneddon FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ 2712b129932SDaniel Sneddon X86_FEATURE_RSB_VMEXIT_LITE 2722b129932SDaniel Sneddon 273fc02735bSJosh Poimboeuf pop %_ASM_ARG2 /* @flags */ 274fc02735bSJosh Poimboeuf pop %_ASM_ARG1 /* @vmx */ 275fc02735bSJosh Poimboeuf 276fc02735bSJosh Poimboeuf call vmx_spec_ctrl_restore_host 277fc02735bSJosh Poimboeuf 278*1c42ff89SPawan Gupta CLEAR_BRANCH_HISTORY_VMEXIT 279eb36b0dcSPawan Gupta 280fc02735bSJosh Poimboeuf /* Put return value in AX */ 281fc02735bSJosh Poimboeuf mov %_ASM_BX, %_ASM_AX 282fc02735bSJosh Poimboeuf 2838bd200d2SJosh Poimboeuf pop %_ASM_BX 2843b895ef4SSean Christopherson#ifdef CONFIG_X86_64 2853b895ef4SSean Christopherson pop %r12 2863b895ef4SSean Christopherson pop %r13 2873b895ef4SSean Christopherson pop %r14 2883b895ef4SSean Christopherson pop %r15 2893b895ef4SSean Christopherson#else 2903b895ef4SSean Christopherson pop %esi 2913b895ef4SSean Christopherson pop %edi 2923b895ef4SSean Christopherson#endif 2935e0781dfSSean Christopherson pop %_ASM_BP 294f94909ceSPeter Zijlstra RET 2955e0781dfSSean Christopherson 2968bd200d2SJosh Poimboeuf.Lfixup: 2978bd200d2SJosh Poimboeuf cmpb $0, kvm_rebooting 2988bd200d2SJosh Poimboeuf jne .Lvmfail 2998bd200d2SJosh Poimboeuf ud2 3008bd200d2SJosh Poimboeuf.Lvmfail: 3018bd200d2SJosh Poimboeuf /* VM-Fail: set return value to 1 */ 302fc02735bSJosh Poimboeuf mov $1, %_ASM_BX 3038bd200d2SJosh Poimboeuf jmp .Lclear_regs 3048bd200d2SJosh Poimboeuf 3056dcc5627SJiri SlabySYM_FUNC_END(__vmx_vcpu_run) 306842f4be9SSean Christopherson 30711df586dSSean ChristophersonSYM_FUNC_START(vmx_do_nmi_irqoff) 30811df586dSSean Christopherson VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx 30911df586dSSean ChristophersonSYM_FUNC_END(vmx_do_nmi_irqoff) 31011df586dSSean Christopherson 3110b5e7a16SSean Christopherson#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 312c20d403fSSean Christopherson 313842f4be9SSean Christopherson/** 314842f4be9SSean Christopherson * vmread_error_trampoline - Trampoline from inline asm to vmread_error() 315842f4be9SSean Christopherson * @field: VMCS field encoding that failed 316842f4be9SSean Christopherson * @fault: %true if the VMREAD faulted, %false if it failed 31753293cb8SRong Tao * 318842f4be9SSean Christopherson * Save and restore volatile registers across a call to vmread_error(). Note, 319842f4be9SSean Christopherson * all parameters are passed on the stack. 320842f4be9SSean Christopherson */ 321842f4be9SSean ChristophersonSYM_FUNC_START(vmread_error_trampoline) 322842f4be9SSean Christopherson push %_ASM_BP 323842f4be9SSean Christopherson mov %_ASM_SP, %_ASM_BP 324842f4be9SSean Christopherson 325842f4be9SSean Christopherson push %_ASM_AX 326842f4be9SSean Christopherson push %_ASM_CX 327842f4be9SSean Christopherson push %_ASM_DX 328842f4be9SSean Christopherson#ifdef CONFIG_X86_64 329842f4be9SSean Christopherson push %rdi 330842f4be9SSean Christopherson push %rsi 331842f4be9SSean Christopherson push %r8 332842f4be9SSean Christopherson push %r9 333842f4be9SSean Christopherson push %r10 334842f4be9SSean Christopherson push %r11 335842f4be9SSean Christopherson#endif 33657abfa11SUros Bizjak 337842f4be9SSean Christopherson /* Load @field and @fault to arg1 and arg2 respectively. */ 33857abfa11SUros Bizjak mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2 33957abfa11SUros Bizjak mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1 340842f4be9SSean Christopherson 341c20d403fSSean Christopherson call vmread_error_trampoline2 342842f4be9SSean Christopherson 343842f4be9SSean Christopherson /* Zero out @fault, which will be popped into the result register. */ 344842f4be9SSean Christopherson _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) 345842f4be9SSean Christopherson 346842f4be9SSean Christopherson#ifdef CONFIG_X86_64 347842f4be9SSean Christopherson pop %r11 348842f4be9SSean Christopherson pop %r10 349842f4be9SSean Christopherson pop %r9 350842f4be9SSean Christopherson pop %r8 351842f4be9SSean Christopherson pop %rsi 352842f4be9SSean Christopherson pop %rdi 353842f4be9SSean Christopherson#endif 354842f4be9SSean Christopherson pop %_ASM_DX 355842f4be9SSean Christopherson pop %_ASM_CX 356842f4be9SSean Christopherson pop %_ASM_AX 357842f4be9SSean Christopherson pop %_ASM_BP 358842f4be9SSean Christopherson 359f94909ceSPeter Zijlstra RET 360842f4be9SSean ChristophersonSYM_FUNC_END(vmread_error_trampoline) 3610b5e7a16SSean Christopherson#endif 362535f7ef2SSean Christopherson 363c20d403fSSean Christopherson.section .text, "ax" 364c20d403fSSean Christopherson 3654f76e86fSSean ChristophersonSYM_FUNC_START(vmx_do_interrupt_irqoff) 3664f76e86fSSean Christopherson VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1 3674f76e86fSSean ChristophersonSYM_FUNC_END(vmx_do_interrupt_irqoff) 368