xref: /openbmc/linux/arch/x86/kvm/vmx/vmenter.S (revision 3213486f)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6
7#define WORD_SIZE (BITS_PER_LONG / 8)
8
9#define VCPU_RAX	__VCPU_REGS_RAX * WORD_SIZE
10#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
11#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
12#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
13/* Intentionally omit RSP as it's context switched by hardware */
14#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
15#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
16#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
17
18#ifdef CONFIG_X86_64
19#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
20#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
21#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
22#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
23#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
24#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
25#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
26#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
27#endif
28
29	.text
30
31/**
32 * vmx_vmenter - VM-Enter the current loaded VMCS
33 *
34 * %RFLAGS.ZF:	!VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
35 *
36 * Returns:
37 *	%RFLAGS.CF is set on VM-Fail Invalid
38 *	%RFLAGS.ZF is set on VM-Fail Valid
39 *	%RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
40 *
41 * Note that VMRESUME/VMLAUNCH fall-through and return directly if
42 * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
43 * to vmx_vmexit.
44 */
45ENTRY(vmx_vmenter)
46	/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
47	je 2f
48
491:	vmresume
50	ret
51
522:	vmlaunch
53	ret
54
553:	cmpb $0, kvm_rebooting
56	jne 4f
57	call kvm_spurious_fault
584:	ret
59
60	.pushsection .fixup, "ax"
615:	jmp 3b
62	.popsection
63
64	_ASM_EXTABLE(1b, 5b)
65	_ASM_EXTABLE(2b, 5b)
66
67ENDPROC(vmx_vmenter)
68
69/**
70 * vmx_vmexit - Handle a VMX VM-Exit
71 *
72 * Returns:
73 *	%RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
74 *
75 * This is vmx_vmenter's partner in crime.  On a VM-Exit, control will jump
76 * here after hardware loads the host's state, i.e. this is the destination
77 * referred to by VMCS.HOST_RIP.
78 */
79ENTRY(vmx_vmexit)
80	ret
81ENDPROC(vmx_vmexit)
82
83/**
84 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
85 * @vmx:	struct vcpu_vmx *
86 * @regs:	unsigned long * (to guest registers)
87 * @launched:	%true if the VMCS has been launched
88 *
89 * Returns:
90 *	0 on VM-Exit, 1 on VM-Fail
91 */
92ENTRY(__vmx_vcpu_run)
93	push %_ASM_BP
94	mov  %_ASM_SP, %_ASM_BP
95#ifdef CONFIG_X86_64
96	push %r15
97	push %r14
98	push %r13
99	push %r12
100#else
101	push %edi
102	push %esi
103#endif
104	push %_ASM_BX
105
106	/*
107	 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
108	 * @regs is needed after VM-Exit to save the guest's register values.
109	 */
110	push %_ASM_ARG2
111
112	/* Copy @launched to BL, _ASM_ARG3 is volatile. */
113	mov %_ASM_ARG3B, %bl
114
115	/* Adjust RSP to account for the CALL to vmx_vmenter(). */
116	lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
117	call vmx_update_host_rsp
118
119	/* Load @regs to RAX. */
120	mov (%_ASM_SP), %_ASM_AX
121
122	/* Check if vmlaunch or vmresume is needed */
123	cmpb $0, %bl
124
125	/* Load guest registers.  Don't clobber flags. */
126	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
127	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
128	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
129	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
130	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
131	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
132#ifdef CONFIG_X86_64
133	mov VCPU_R8 (%_ASM_AX),  %r8
134	mov VCPU_R9 (%_ASM_AX),  %r9
135	mov VCPU_R10(%_ASM_AX), %r10
136	mov VCPU_R11(%_ASM_AX), %r11
137	mov VCPU_R12(%_ASM_AX), %r12
138	mov VCPU_R13(%_ASM_AX), %r13
139	mov VCPU_R14(%_ASM_AX), %r14
140	mov VCPU_R15(%_ASM_AX), %r15
141#endif
142	/* Load guest RAX.  This kills the vmx_vcpu pointer! */
143	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
144
145	/* Enter guest mode */
146	call vmx_vmenter
147
148	/* Jump on VM-Fail. */
149	jbe 2f
150
151	/* Temporarily save guest's RAX. */
152	push %_ASM_AX
153
154	/* Reload @regs to RAX. */
155	mov WORD_SIZE(%_ASM_SP), %_ASM_AX
156
157	/* Save all guest registers, including RAX from the stack */
158	__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
159	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
160	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
161	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
162	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
163	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
164	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
165#ifdef CONFIG_X86_64
166	mov %r8,  VCPU_R8 (%_ASM_AX)
167	mov %r9,  VCPU_R9 (%_ASM_AX)
168	mov %r10, VCPU_R10(%_ASM_AX)
169	mov %r11, VCPU_R11(%_ASM_AX)
170	mov %r12, VCPU_R12(%_ASM_AX)
171	mov %r13, VCPU_R13(%_ASM_AX)
172	mov %r14, VCPU_R14(%_ASM_AX)
173	mov %r15, VCPU_R15(%_ASM_AX)
174#endif
175
176	/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
177	xor %eax, %eax
178
179	/*
180	 * Clear all general purpose registers except RSP and RAX to prevent
181	 * speculative use of the guest's values, even those that are reloaded
182	 * via the stack.  In theory, an L1 cache miss when restoring registers
183	 * could lead to speculative execution with the guest's values.
184	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
185	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
186	 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
187	 */
1881:	xor %ebx, %ebx
189	xor %ecx, %ecx
190	xor %edx, %edx
191	xor %esi, %esi
192	xor %edi, %edi
193	xor %ebp, %ebp
194#ifdef CONFIG_X86_64
195	xor %r8d,  %r8d
196	xor %r9d,  %r9d
197	xor %r10d, %r10d
198	xor %r11d, %r11d
199	xor %r12d, %r12d
200	xor %r13d, %r13d
201	xor %r14d, %r14d
202	xor %r15d, %r15d
203#endif
204
205	/* "POP" @regs. */
206	add $WORD_SIZE, %_ASM_SP
207	pop %_ASM_BX
208
209#ifdef CONFIG_X86_64
210	pop %r12
211	pop %r13
212	pop %r14
213	pop %r15
214#else
215	pop %esi
216	pop %edi
217#endif
218	pop %_ASM_BP
219	ret
220
221	/* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
2222:	mov $1, %eax
223	jmp 1b
224ENDPROC(__vmx_vcpu_run)
225