xref: /openbmc/linux/arch/x86/kvm/vmx/vmenter.S (revision 9756bba2)
1453eafbeSSean Christopherson/* SPDX-License-Identifier: GPL-2.0 */
2453eafbeSSean Christopherson#include <linux/linkage.h>
3453eafbeSSean Christopherson#include <asm/asm.h>
45e0781dfSSean Christopherson#include <asm/bitsperlong.h>
55e0781dfSSean Christopherson#include <asm/kvm_vcpu_regs.h>
6f2fde6a5SRick Edgecombe#include <asm/nospec-branch.h>
7535f7ef2SSean Christopherson#include <asm/segment.h>
8bb066506SJosh Poimboeuf#include "run_flags.h"
95e0781dfSSean Christopherson
105e0781dfSSean Christopherson#define WORD_SIZE (BITS_PER_LONG / 8)
115e0781dfSSean Christopherson
125e0781dfSSean Christopherson#define VCPU_RAX	__VCPU_REGS_RAX * WORD_SIZE
135e0781dfSSean Christopherson#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
145e0781dfSSean Christopherson#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
155e0781dfSSean Christopherson#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
165e0781dfSSean Christopherson/* Intentionally omit RSP as it's context switched by hardware */
175e0781dfSSean Christopherson#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
185e0781dfSSean Christopherson#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
195e0781dfSSean Christopherson#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
205e0781dfSSean Christopherson
215e0781dfSSean Christopherson#ifdef CONFIG_X86_64
225e0781dfSSean Christopherson#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
235e0781dfSSean Christopherson#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
245e0781dfSSean Christopherson#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
255e0781dfSSean Christopherson#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
265e0781dfSSean Christopherson#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
275e0781dfSSean Christopherson#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
285e0781dfSSean Christopherson#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
295e0781dfSSean Christopherson#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
305e0781dfSSean Christopherson#endif
31453eafbeSSean Christopherson
323ebccdf3SThomas Gleixner.section .noinstr.text, "ax"
33453eafbeSSean Christopherson
34453eafbeSSean Christopherson/**
35ee2fc635SSean Christopherson * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
36fc02735bSJosh Poimboeuf * @vmx:	struct vcpu_vmx *
375e0781dfSSean Christopherson * @regs:	unsigned long * (to guest registers)
38bb066506SJosh Poimboeuf * @flags:	VMX_RUN_VMRESUME:	use VMRESUME instead of VMLAUNCH
39fc02735bSJosh Poimboeuf *		VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
405e0781dfSSean Christopherson *
415e0781dfSSean Christopherson * Returns:
42e75c3c3aSSean Christopherson *	0 on VM-Exit, 1 on VM-Fail
435e0781dfSSean Christopherson */
446dcc5627SJiri SlabySYM_FUNC_START(__vmx_vcpu_run)
455e0781dfSSean Christopherson	push %_ASM_BP
465e0781dfSSean Christopherson	mov  %_ASM_SP, %_ASM_BP
473b895ef4SSean Christopherson#ifdef CONFIG_X86_64
483b895ef4SSean Christopherson	push %r15
493b895ef4SSean Christopherson	push %r14
503b895ef4SSean Christopherson	push %r13
513b895ef4SSean Christopherson	push %r12
523b895ef4SSean Christopherson#else
533b895ef4SSean Christopherson	push %edi
543b895ef4SSean Christopherson	push %esi
553b895ef4SSean Christopherson#endif
563b895ef4SSean Christopherson	push %_ASM_BX
575e0781dfSSean Christopherson
58fc02735bSJosh Poimboeuf	/* Save @vmx for SPEC_CTRL handling */
59fc02735bSJosh Poimboeuf	push %_ASM_ARG1
60fc02735bSJosh Poimboeuf
61fc02735bSJosh Poimboeuf	/* Save @flags for SPEC_CTRL handling */
62fc02735bSJosh Poimboeuf	push %_ASM_ARG3
63fc02735bSJosh Poimboeuf
645e0781dfSSean Christopherson	/*
655e0781dfSSean Christopherson	 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
665e0781dfSSean Christopherson	 * @regs is needed after VM-Exit to save the guest's register values.
675e0781dfSSean Christopherson	 */
685e0781dfSSean Christopherson	push %_ASM_ARG2
695e0781dfSSean Christopherson
70bb066506SJosh Poimboeuf	/* Copy @flags to BL, _ASM_ARG3 is volatile. */
7177df5495SSean Christopherson	mov %_ASM_ARG3B, %bl
7277df5495SSean Christopherson
738bd200d2SJosh Poimboeuf	lea (%_ASM_SP), %_ASM_ARG2
745e0781dfSSean Christopherson	call vmx_update_host_rsp
755e0781dfSSean Christopherson
76a62fd5a7SSean Christopherson	/* Load @regs to RAX. */
77a62fd5a7SSean Christopherson	mov (%_ASM_SP), %_ASM_AX
785e0781dfSSean Christopherson
795e0781dfSSean Christopherson	/* Check if vmlaunch or vmresume is needed */
80bb066506SJosh Poimboeuf	testb $VMX_RUN_VMRESUME, %bl
815e0781dfSSean Christopherson
825e0781dfSSean Christopherson	/* Load guest registers.  Don't clobber flags. */
83a62fd5a7SSean Christopherson	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
84a62fd5a7SSean Christopherson	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
85bb03911fSUros Bizjak	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
86bb03911fSUros Bizjak	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
87a62fd5a7SSean Christopherson	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
88a62fd5a7SSean Christopherson	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
895e0781dfSSean Christopherson#ifdef CONFIG_X86_64
90a62fd5a7SSean Christopherson	mov VCPU_R8 (%_ASM_AX),  %r8
91a62fd5a7SSean Christopherson	mov VCPU_R9 (%_ASM_AX),  %r9
92a62fd5a7SSean Christopherson	mov VCPU_R10(%_ASM_AX), %r10
93a62fd5a7SSean Christopherson	mov VCPU_R11(%_ASM_AX), %r11
94a62fd5a7SSean Christopherson	mov VCPU_R12(%_ASM_AX), %r12
95a62fd5a7SSean Christopherson	mov VCPU_R13(%_ASM_AX), %r13
96a62fd5a7SSean Christopherson	mov VCPU_R14(%_ASM_AX), %r14
97a62fd5a7SSean Christopherson	mov VCPU_R15(%_ASM_AX), %r15
985e0781dfSSean Christopherson#endif
99b6852ae7SSean Christopherson	/* Load guest RAX.  This kills the @regs pointer! */
100a62fd5a7SSean Christopherson	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
1015e0781dfSSean Christopherson
1028bd200d2SJosh Poimboeuf	/* Check EFLAGS.ZF from 'testb' above */
103bb066506SJosh Poimboeuf	jz .Lvmlaunch
1045e0781dfSSean Christopherson
1058bd200d2SJosh Poimboeuf	/*
1068bd200d2SJosh Poimboeuf	 * After a successful VMRESUME/VMLAUNCH, control flow "magically"
1078bd200d2SJosh Poimboeuf	 * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
1088bd200d2SJosh Poimboeuf	 * So this isn't a typical function and objtool needs to be told to
1098bd200d2SJosh Poimboeuf	 * save the unwind state here and restore it below.
1108bd200d2SJosh Poimboeuf	 */
1118bd200d2SJosh Poimboeuf	UNWIND_HINT_SAVE
1128bd200d2SJosh Poimboeuf
1138bd200d2SJosh Poimboeuf/*
1148bd200d2SJosh Poimboeuf * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
1158bd200d2SJosh Poimboeuf * the 'vmx_vmexit' label below.
1168bd200d2SJosh Poimboeuf */
1178bd200d2SJosh Poimboeuf.Lvmresume:
1188bd200d2SJosh Poimboeuf	vmresume
1198bd200d2SJosh Poimboeuf	jmp .Lvmfail
1208bd200d2SJosh Poimboeuf
1218bd200d2SJosh Poimboeuf.Lvmlaunch:
1228bd200d2SJosh Poimboeuf	vmlaunch
1238bd200d2SJosh Poimboeuf	jmp .Lvmfail
1248bd200d2SJosh Poimboeuf
1258bd200d2SJosh Poimboeuf	_ASM_EXTABLE(.Lvmresume, .Lfixup)
1268bd200d2SJosh Poimboeuf	_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
1278bd200d2SJosh Poimboeuf
1288bd200d2SJosh PoimboeufSYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
1298bd200d2SJosh Poimboeuf
1308bd200d2SJosh Poimboeuf	/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
1318bd200d2SJosh Poimboeuf	UNWIND_HINT_RESTORE
1328bd200d2SJosh Poimboeuf	ENDBR
1335e0781dfSSean Christopherson
134a62fd5a7SSean Christopherson	/* Temporarily save guest's RAX. */
135a62fd5a7SSean Christopherson	push %_ASM_AX
1365e0781dfSSean Christopherson
137a62fd5a7SSean Christopherson	/* Reload @regs to RAX. */
138a62fd5a7SSean Christopherson	mov WORD_SIZE(%_ASM_SP), %_ASM_AX
1395e0781dfSSean Christopherson
140a62fd5a7SSean Christopherson	/* Save all guest registers, including RAX from the stack */
141c16312f4SUros Bizjak	pop           VCPU_RAX(%_ASM_AX)
142a62fd5a7SSean Christopherson	mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
143a62fd5a7SSean Christopherson	mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
144bb03911fSUros Bizjak	mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
145bb03911fSUros Bizjak	mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
146a62fd5a7SSean Christopherson	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
147a62fd5a7SSean Christopherson	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
1485e0781dfSSean Christopherson#ifdef CONFIG_X86_64
149a62fd5a7SSean Christopherson	mov %r8,  VCPU_R8 (%_ASM_AX)
150a62fd5a7SSean Christopherson	mov %r9,  VCPU_R9 (%_ASM_AX)
151a62fd5a7SSean Christopherson	mov %r10, VCPU_R10(%_ASM_AX)
152a62fd5a7SSean Christopherson	mov %r11, VCPU_R11(%_ASM_AX)
153a62fd5a7SSean Christopherson	mov %r12, VCPU_R12(%_ASM_AX)
154a62fd5a7SSean Christopherson	mov %r13, VCPU_R13(%_ASM_AX)
155a62fd5a7SSean Christopherson	mov %r14, VCPU_R14(%_ASM_AX)
156a62fd5a7SSean Christopherson	mov %r15, VCPU_R15(%_ASM_AX)
1575e0781dfSSean Christopherson#endif
1585e0781dfSSean Christopherson
159fc02735bSJosh Poimboeuf	/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
160fc02735bSJosh Poimboeuf	xor %ebx, %ebx
1615e0781dfSSean Christopherson
1628bd200d2SJosh Poimboeuf.Lclear_regs:
1635e0781dfSSean Christopherson	/*
164fc02735bSJosh Poimboeuf	 * Clear all general purpose registers except RSP and RBX to prevent
1655e0781dfSSean Christopherson	 * speculative use of the guest's values, even those that are reloaded
1665e0781dfSSean Christopherson	 * via the stack.  In theory, an L1 cache miss when restoring registers
1675e0781dfSSean Christopherson	 * could lead to speculative execution with the guest's values.
1685e0781dfSSean Christopherson	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
169e75c3c3aSSean Christopherson	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
170fc02735bSJosh Poimboeuf	 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
171fc02735bSJosh Poimboeuf	 * value.
1725e0781dfSSean Christopherson	 */
173fc02735bSJosh Poimboeuf	xor %eax, %eax
1748bd200d2SJosh Poimboeuf	xor %ecx, %ecx
1754f44c4eeSSean Christopherson	xor %edx, %edx
176bb03911fSUros Bizjak	xor %ebp, %ebp
1774f44c4eeSSean Christopherson	xor %esi, %esi
1784f44c4eeSSean Christopherson	xor %edi, %edi
1795e0781dfSSean Christopherson#ifdef CONFIG_X86_64
1805e0781dfSSean Christopherson	xor %r8d,  %r8d
1815e0781dfSSean Christopherson	xor %r9d,  %r9d
1825e0781dfSSean Christopherson	xor %r10d, %r10d
1835e0781dfSSean Christopherson	xor %r11d, %r11d
1845e0781dfSSean Christopherson	xor %r12d, %r12d
1855e0781dfSSean Christopherson	xor %r13d, %r13d
1865e0781dfSSean Christopherson	xor %r14d, %r14d
1875e0781dfSSean Christopherson	xor %r15d, %r15d
1885e0781dfSSean Christopherson#endif
1895e0781dfSSean Christopherson
1905e0781dfSSean Christopherson	/* "POP" @regs. */
1915e0781dfSSean Christopherson	add $WORD_SIZE, %_ASM_SP
1923b895ef4SSean Christopherson
193fc02735bSJosh Poimboeuf	/*
194fc02735bSJosh Poimboeuf	 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
195fc02735bSJosh Poimboeuf	 * the first unbalanced RET after vmexit!
196fc02735bSJosh Poimboeuf	 *
197*9756bba2SJosh Poimboeuf	 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
198*9756bba2SJosh Poimboeuf	 * entries and (in some cases) RSB underflow.
199fc02735bSJosh Poimboeuf	 *
200fc02735bSJosh Poimboeuf	 * eIBRS has its own protection against poisoned RSB, so it doesn't
201fc02735bSJosh Poimboeuf	 * need the RSB filling sequence.  But it does need to be enabled
202fc02735bSJosh Poimboeuf	 * before the first unbalanced RET.
203fc02735bSJosh Poimboeuf         */
204fc02735bSJosh Poimboeuf
205*9756bba2SJosh Poimboeuf	FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
206fc02735bSJosh Poimboeuf
207fc02735bSJosh Poimboeuf	pop %_ASM_ARG2	/* @flags */
208fc02735bSJosh Poimboeuf	pop %_ASM_ARG1	/* @vmx */
209fc02735bSJosh Poimboeuf
210fc02735bSJosh Poimboeuf	call vmx_spec_ctrl_restore_host
211fc02735bSJosh Poimboeuf
212fc02735bSJosh Poimboeuf	/* Put return value in AX */
213fc02735bSJosh Poimboeuf	mov %_ASM_BX, %_ASM_AX
214fc02735bSJosh Poimboeuf
2158bd200d2SJosh Poimboeuf	pop %_ASM_BX
2163b895ef4SSean Christopherson#ifdef CONFIG_X86_64
2173b895ef4SSean Christopherson	pop %r12
2183b895ef4SSean Christopherson	pop %r13
2193b895ef4SSean Christopherson	pop %r14
2203b895ef4SSean Christopherson	pop %r15
2213b895ef4SSean Christopherson#else
2223b895ef4SSean Christopherson	pop %esi
2233b895ef4SSean Christopherson	pop %edi
2243b895ef4SSean Christopherson#endif
2255e0781dfSSean Christopherson	pop %_ASM_BP
226f94909ceSPeter Zijlstra	RET
2275e0781dfSSean Christopherson
2288bd200d2SJosh Poimboeuf.Lfixup:
2298bd200d2SJosh Poimboeuf	cmpb $0, kvm_rebooting
2308bd200d2SJosh Poimboeuf	jne .Lvmfail
2318bd200d2SJosh Poimboeuf	ud2
2328bd200d2SJosh Poimboeuf.Lvmfail:
2338bd200d2SJosh Poimboeuf	/* VM-Fail: set return value to 1 */
234fc02735bSJosh Poimboeuf	mov $1, %_ASM_BX
2358bd200d2SJosh Poimboeuf	jmp .Lclear_regs
2368bd200d2SJosh Poimboeuf
2376dcc5627SJiri SlabySYM_FUNC_END(__vmx_vcpu_run)
238842f4be9SSean Christopherson
2393ebccdf3SThomas Gleixner
2403ebccdf3SThomas Gleixner.section .text, "ax"
2413ebccdf3SThomas Gleixner
242842f4be9SSean Christopherson/**
243842f4be9SSean Christopherson * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
244842f4be9SSean Christopherson * @field:	VMCS field encoding that failed
245842f4be9SSean Christopherson * @fault:	%true if the VMREAD faulted, %false if it failed
246842f4be9SSean Christopherson
247842f4be9SSean Christopherson * Save and restore volatile registers across a call to vmread_error().  Note,
248842f4be9SSean Christopherson * all parameters are passed on the stack.
249842f4be9SSean Christopherson */
250842f4be9SSean ChristophersonSYM_FUNC_START(vmread_error_trampoline)
251842f4be9SSean Christopherson	push %_ASM_BP
252842f4be9SSean Christopherson	mov  %_ASM_SP, %_ASM_BP
253842f4be9SSean Christopherson
254842f4be9SSean Christopherson	push %_ASM_AX
255842f4be9SSean Christopherson	push %_ASM_CX
256842f4be9SSean Christopherson	push %_ASM_DX
257842f4be9SSean Christopherson#ifdef CONFIG_X86_64
258842f4be9SSean Christopherson	push %rdi
259842f4be9SSean Christopherson	push %rsi
260842f4be9SSean Christopherson	push %r8
261842f4be9SSean Christopherson	push %r9
262842f4be9SSean Christopherson	push %r10
263842f4be9SSean Christopherson	push %r11
264842f4be9SSean Christopherson#endif
265842f4be9SSean Christopherson#ifdef CONFIG_X86_64
266842f4be9SSean Christopherson	/* Load @field and @fault to arg1 and arg2 respectively. */
267842f4be9SSean Christopherson	mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
268842f4be9SSean Christopherson	mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
269842f4be9SSean Christopherson#else
270842f4be9SSean Christopherson	/* Parameters are passed on the stack for 32-bit (see asmlinkage). */
271842f4be9SSean Christopherson	push 3*WORD_SIZE(%ebp)
272842f4be9SSean Christopherson	push 2*WORD_SIZE(%ebp)
273842f4be9SSean Christopherson#endif
274842f4be9SSean Christopherson
275842f4be9SSean Christopherson	call vmread_error
276842f4be9SSean Christopherson
277842f4be9SSean Christopherson#ifndef CONFIG_X86_64
278842f4be9SSean Christopherson	add $8, %esp
279842f4be9SSean Christopherson#endif
280842f4be9SSean Christopherson
281842f4be9SSean Christopherson	/* Zero out @fault, which will be popped into the result register. */
282842f4be9SSean Christopherson	_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
283842f4be9SSean Christopherson
284842f4be9SSean Christopherson#ifdef CONFIG_X86_64
285842f4be9SSean Christopherson	pop %r11
286842f4be9SSean Christopherson	pop %r10
287842f4be9SSean Christopherson	pop %r9
288842f4be9SSean Christopherson	pop %r8
289842f4be9SSean Christopherson	pop %rsi
290842f4be9SSean Christopherson	pop %rdi
291842f4be9SSean Christopherson#endif
292842f4be9SSean Christopherson	pop %_ASM_DX
293842f4be9SSean Christopherson	pop %_ASM_CX
294842f4be9SSean Christopherson	pop %_ASM_AX
295842f4be9SSean Christopherson	pop %_ASM_BP
296842f4be9SSean Christopherson
297f94909ceSPeter Zijlstra	RET
298842f4be9SSean ChristophersonSYM_FUNC_END(vmread_error_trampoline)
299535f7ef2SSean Christopherson
300535f7ef2SSean ChristophersonSYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
301535f7ef2SSean Christopherson	/*
302535f7ef2SSean Christopherson	 * Unconditionally create a stack frame, getting the correct RSP on the
303535f7ef2SSean Christopherson	 * stack (for x86-64) would take two instructions anyways, and RBP can
304535f7ef2SSean Christopherson	 * be used to restore RSP to make objtool happy (see below).
305535f7ef2SSean Christopherson	 */
306535f7ef2SSean Christopherson	push %_ASM_BP
307535f7ef2SSean Christopherson	mov %_ASM_SP, %_ASM_BP
308535f7ef2SSean Christopherson
309535f7ef2SSean Christopherson#ifdef CONFIG_X86_64
310535f7ef2SSean Christopherson	/*
311535f7ef2SSean Christopherson	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
312535f7ef2SSean Christopherson	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
313535f7ef2SSean Christopherson	 */
314535f7ef2SSean Christopherson	and  $-16, %rsp
315535f7ef2SSean Christopherson	push $__KERNEL_DS
316535f7ef2SSean Christopherson	push %rbp
317535f7ef2SSean Christopherson#endif
318535f7ef2SSean Christopherson	pushf
319535f7ef2SSean Christopherson	push $__KERNEL_CS
320535f7ef2SSean Christopherson	CALL_NOSPEC _ASM_ARG1
321535f7ef2SSean Christopherson
322535f7ef2SSean Christopherson	/*
323535f7ef2SSean Christopherson	 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
324535f7ef2SSean Christopherson	 * the correct value.  objtool doesn't know the callee will IRET and,
325535f7ef2SSean Christopherson	 * without the explicit restore, thinks the stack is getting walloped.
326535f7ef2SSean Christopherson	 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
327535f7ef2SSean Christopherson	 */
328535f7ef2SSean Christopherson	mov %_ASM_BP, %_ASM_SP
329535f7ef2SSean Christopherson	pop %_ASM_BP
330f94909ceSPeter Zijlstra	RET
331535f7ef2SSean ChristophersonSYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
332