xref: /openbmc/linux/arch/x86/kvm/svm/vmenter.S (revision 15e3ae36)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6
7#define WORD_SIZE (BITS_PER_LONG / 8)
8
9/* Intentionally omit RAX as it's context switched by hardware */
10#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
11#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
12#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
13/* Intentionally omit RSP as it's context switched by hardware */
14#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
15#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
16#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
17
18#ifdef CONFIG_X86_64
19#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
20#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
21#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
22#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
23#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
24#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
25#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
26#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
27#endif
28
29	.text
30
31/**
32 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
33 * @vmcb_pa:	unsigned long
34 * @regs:	unsigned long * (to guest registers)
35 */
36SYM_FUNC_START(__svm_vcpu_run)
37	push %_ASM_BP
38	mov  %_ASM_SP, %_ASM_BP
39#ifdef CONFIG_X86_64
40	push %r15
41	push %r14
42	push %r13
43	push %r12
44#else
45	push %edi
46	push %esi
47#endif
48	push %_ASM_BX
49
50	/* Save @regs. */
51	push %_ASM_ARG2
52
53	/* Save @vmcb. */
54	push %_ASM_ARG1
55
56	/* Move @regs to RAX. */
57	mov %_ASM_ARG2, %_ASM_AX
58
59	/* Load guest registers. */
60	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66#ifdef CONFIG_X86_64
67	mov VCPU_R8 (%_ASM_AX),  %r8
68	mov VCPU_R9 (%_ASM_AX),  %r9
69	mov VCPU_R10(%_ASM_AX), %r10
70	mov VCPU_R11(%_ASM_AX), %r11
71	mov VCPU_R12(%_ASM_AX), %r12
72	mov VCPU_R13(%_ASM_AX), %r13
73	mov VCPU_R14(%_ASM_AX), %r14
74	mov VCPU_R15(%_ASM_AX), %r15
75#endif
76
77	/* "POP" @vmcb to RAX. */
78	pop %_ASM_AX
79
80	/* Enter guest mode */
811:	vmload %_ASM_AX
82	jmp 3f
832:	cmpb $0, kvm_rebooting
84	jne 3f
85	ud2
86	_ASM_EXTABLE(1b, 2b)
87
883:	vmrun %_ASM_AX
89	jmp 5f
904:	cmpb $0, kvm_rebooting
91	jne 5f
92	ud2
93	_ASM_EXTABLE(3b, 4b)
94
955:	vmsave %_ASM_AX
96	jmp 7f
976:	cmpb $0, kvm_rebooting
98	jne 7f
99	ud2
100	_ASM_EXTABLE(5b, 6b)
1017:
102	/* "POP" @regs to RAX. */
103	pop %_ASM_AX
104
105	/* Save all guest registers.  */
106	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
107	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
108	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
109	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
110	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
111	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
112#ifdef CONFIG_X86_64
113	mov %r8,  VCPU_R8 (%_ASM_AX)
114	mov %r9,  VCPU_R9 (%_ASM_AX)
115	mov %r10, VCPU_R10(%_ASM_AX)
116	mov %r11, VCPU_R11(%_ASM_AX)
117	mov %r12, VCPU_R12(%_ASM_AX)
118	mov %r13, VCPU_R13(%_ASM_AX)
119	mov %r14, VCPU_R14(%_ASM_AX)
120	mov %r15, VCPU_R15(%_ASM_AX)
121#endif
122
123	/*
124	 * Clear all general purpose registers except RSP and RAX to prevent
125	 * speculative use of the guest's values, even those that are reloaded
126	 * via the stack.  In theory, an L1 cache miss when restoring registers
127	 * could lead to speculative execution with the guest's values.
128	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
129	 * free.  RSP and RAX are exempt as they are restored by hardware
130	 * during VM-Exit.
131	 */
132	xor %ecx, %ecx
133	xor %edx, %edx
134	xor %ebx, %ebx
135	xor %ebp, %ebp
136	xor %esi, %esi
137	xor %edi, %edi
138#ifdef CONFIG_X86_64
139	xor %r8d,  %r8d
140	xor %r9d,  %r9d
141	xor %r10d, %r10d
142	xor %r11d, %r11d
143	xor %r12d, %r12d
144	xor %r13d, %r13d
145	xor %r14d, %r14d
146	xor %r15d, %r15d
147#endif
148
149	pop %_ASM_BX
150
151#ifdef CONFIG_X86_64
152	pop %r12
153	pop %r13
154	pop %r14
155	pop %r15
156#else
157	pop %esi
158	pop %edi
159#endif
160	pop %_ASM_BP
161	ret
162SYM_FUNC_END(__svm_vcpu_run)
163