xref: /openbmc/linux/arch/x86/kvm/vmx/hyperv.c (revision 662f6815)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/errno.h>
5 #include <linux/smp.h>
6 
7 #include "../cpuid.h"
8 #include "hyperv.h"
9 #include "nested.h"
10 #include "vmcs.h"
11 #include "vmx.h"
12 #include "trace.h"
13 
14 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
15 
16 /*
17  * Enlightened VMCSv1 doesn't support these:
18  *
19  *	POSTED_INTR_NV                  = 0x00000002,
20  *	GUEST_INTR_STATUS               = 0x00000810,
21  *	APIC_ACCESS_ADDR		= 0x00002014,
22  *	POSTED_INTR_DESC_ADDR           = 0x00002016,
23  *	EOI_EXIT_BITMAP0                = 0x0000201c,
24  *	EOI_EXIT_BITMAP1                = 0x0000201e,
25  *	EOI_EXIT_BITMAP2                = 0x00002020,
26  *	EOI_EXIT_BITMAP3                = 0x00002022,
27  *	GUEST_PML_INDEX			= 0x00000812,
28  *	PML_ADDRESS			= 0x0000200e,
29  *	VM_FUNCTION_CONTROL             = 0x00002018,
30  *	EPTP_LIST_ADDRESS               = 0x00002024,
31  *	VMREAD_BITMAP                   = 0x00002026,
32  *	VMWRITE_BITMAP                  = 0x00002028,
33  *
34  *	TSC_MULTIPLIER                  = 0x00002032,
35  *	PLE_GAP                         = 0x00004020,
36  *	PLE_WINDOW                      = 0x00004022,
37  *	VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
38  *
39  * Currently unsupported in KVM:
40  *	GUEST_IA32_RTIT_CTL		= 0x00002814,
41  */
42 #define EVMCS1_SUPPORTED_PINCTRL					\
43 	(PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |				\
44 	 PIN_BASED_EXT_INTR_MASK |					\
45 	 PIN_BASED_NMI_EXITING |					\
46 	 PIN_BASED_VIRTUAL_NMIS)
47 
48 #define EVMCS1_SUPPORTED_EXEC_CTRL					\
49 	(CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |				\
50 	 CPU_BASED_HLT_EXITING |					\
51 	 CPU_BASED_CR3_LOAD_EXITING |					\
52 	 CPU_BASED_CR3_STORE_EXITING |					\
53 	 CPU_BASED_UNCOND_IO_EXITING |					\
54 	 CPU_BASED_MOV_DR_EXITING |					\
55 	 CPU_BASED_USE_TSC_OFFSETTING |					\
56 	 CPU_BASED_MWAIT_EXITING |					\
57 	 CPU_BASED_MONITOR_EXITING |					\
58 	 CPU_BASED_INVLPG_EXITING |					\
59 	 CPU_BASED_RDPMC_EXITING |					\
60 	 CPU_BASED_INTR_WINDOW_EXITING |				\
61 	 CPU_BASED_CR8_LOAD_EXITING |					\
62 	 CPU_BASED_CR8_STORE_EXITING |					\
63 	 CPU_BASED_RDTSC_EXITING |					\
64 	 CPU_BASED_TPR_SHADOW |						\
65 	 CPU_BASED_USE_IO_BITMAPS |					\
66 	 CPU_BASED_MONITOR_TRAP_FLAG |					\
67 	 CPU_BASED_USE_MSR_BITMAPS |					\
68 	 CPU_BASED_NMI_WINDOW_EXITING |					\
69 	 CPU_BASED_PAUSE_EXITING |					\
70 	 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
71 
72 #define EVMCS1_SUPPORTED_2NDEXEC					\
73 	(SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |			\
74 	 SECONDARY_EXEC_WBINVD_EXITING |				\
75 	 SECONDARY_EXEC_ENABLE_VPID |					\
76 	 SECONDARY_EXEC_ENABLE_EPT |					\
77 	 SECONDARY_EXEC_UNRESTRICTED_GUEST |				\
78 	 SECONDARY_EXEC_DESC |						\
79 	 SECONDARY_EXEC_ENABLE_RDTSCP |					\
80 	 SECONDARY_EXEC_ENABLE_INVPCID |				\
81 	 SECONDARY_EXEC_ENABLE_XSAVES |					\
82 	 SECONDARY_EXEC_RDSEED_EXITING |				\
83 	 SECONDARY_EXEC_RDRAND_EXITING |				\
84 	 SECONDARY_EXEC_TSC_SCALING |					\
85 	 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |				\
86 	 SECONDARY_EXEC_PT_USE_GPA |					\
87 	 SECONDARY_EXEC_PT_CONCEAL_VMX |				\
88 	 SECONDARY_EXEC_BUS_LOCK_DETECTION |				\
89 	 SECONDARY_EXEC_NOTIFY_VM_EXITING |				\
90 	 SECONDARY_EXEC_ENCLS_EXITING)
91 
92 #define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
93 
94 #define EVMCS1_SUPPORTED_VMEXIT_CTRL					\
95 	(VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |				\
96 	 VM_EXIT_SAVE_DEBUG_CONTROLS |					\
97 	 VM_EXIT_ACK_INTR_ON_EXIT |					\
98 	 VM_EXIT_HOST_ADDR_SPACE_SIZE |					\
99 	 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |				\
100 	 VM_EXIT_SAVE_IA32_PAT |					\
101 	 VM_EXIT_LOAD_IA32_PAT |					\
102 	 VM_EXIT_SAVE_IA32_EFER |					\
103 	 VM_EXIT_LOAD_IA32_EFER |					\
104 	 VM_EXIT_CLEAR_BNDCFGS |					\
105 	 VM_EXIT_PT_CONCEAL_PIP |					\
106 	 VM_EXIT_CLEAR_IA32_RTIT_CTL)
107 
108 #define EVMCS1_SUPPORTED_VMENTRY_CTRL					\
109 	(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |				\
110 	 VM_ENTRY_LOAD_DEBUG_CONTROLS |					\
111 	 VM_ENTRY_IA32E_MODE |						\
112 	 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |				\
113 	 VM_ENTRY_LOAD_IA32_PAT |					\
114 	 VM_ENTRY_LOAD_IA32_EFER |					\
115 	 VM_ENTRY_LOAD_BNDCFGS |					\
116 	 VM_ENTRY_PT_CONCEAL_PIP |					\
117 	 VM_ENTRY_LOAD_IA32_RTIT_CTL)
118 
119 #define EVMCS1_SUPPORTED_VMFUNC (0)
120 
121 #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
122 #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
123 		{EVMCS1_OFFSET(name), clean_field}
124 
125 const struct evmcs_field vmcs_field_to_evmcs_1[] = {
126 	/* 64 bit rw */
127 	EVMCS1_FIELD(GUEST_RIP, guest_rip,
128 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
129 	EVMCS1_FIELD(GUEST_RSP, guest_rsp,
130 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
131 	EVMCS1_FIELD(GUEST_RFLAGS, guest_rflags,
132 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
133 	EVMCS1_FIELD(HOST_IA32_PAT, host_ia32_pat,
134 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
135 	EVMCS1_FIELD(HOST_IA32_EFER, host_ia32_efer,
136 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
137 	EVMCS1_FIELD(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl,
138 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
139 	EVMCS1_FIELD(HOST_CR0, host_cr0,
140 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
141 	EVMCS1_FIELD(HOST_CR3, host_cr3,
142 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
143 	EVMCS1_FIELD(HOST_CR4, host_cr4,
144 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
145 	EVMCS1_FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp,
146 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
147 	EVMCS1_FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip,
148 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
149 	EVMCS1_FIELD(HOST_RIP, host_rip,
150 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
151 	EVMCS1_FIELD(IO_BITMAP_A, io_bitmap_a,
152 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
153 	EVMCS1_FIELD(IO_BITMAP_B, io_bitmap_b,
154 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
155 	EVMCS1_FIELD(MSR_BITMAP, msr_bitmap,
156 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP),
157 	EVMCS1_FIELD(GUEST_ES_BASE, guest_es_base,
158 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
159 	EVMCS1_FIELD(GUEST_CS_BASE, guest_cs_base,
160 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
161 	EVMCS1_FIELD(GUEST_SS_BASE, guest_ss_base,
162 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
163 	EVMCS1_FIELD(GUEST_DS_BASE, guest_ds_base,
164 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
165 	EVMCS1_FIELD(GUEST_FS_BASE, guest_fs_base,
166 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
167 	EVMCS1_FIELD(GUEST_GS_BASE, guest_gs_base,
168 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
169 	EVMCS1_FIELD(GUEST_LDTR_BASE, guest_ldtr_base,
170 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
171 	EVMCS1_FIELD(GUEST_TR_BASE, guest_tr_base,
172 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
173 	EVMCS1_FIELD(GUEST_GDTR_BASE, guest_gdtr_base,
174 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
175 	EVMCS1_FIELD(GUEST_IDTR_BASE, guest_idtr_base,
176 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
177 	EVMCS1_FIELD(TSC_OFFSET, tsc_offset,
178 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
179 	EVMCS1_FIELD(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr,
180 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
181 	EVMCS1_FIELD(VMCS_LINK_POINTER, vmcs_link_pointer,
182 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
183 	EVMCS1_FIELD(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl,
184 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
185 	EVMCS1_FIELD(GUEST_IA32_PAT, guest_ia32_pat,
186 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
187 	EVMCS1_FIELD(GUEST_IA32_EFER, guest_ia32_efer,
188 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
189 	EVMCS1_FIELD(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl,
190 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
191 	EVMCS1_FIELD(GUEST_PDPTR0, guest_pdptr0,
192 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
193 	EVMCS1_FIELD(GUEST_PDPTR1, guest_pdptr1,
194 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
195 	EVMCS1_FIELD(GUEST_PDPTR2, guest_pdptr2,
196 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
197 	EVMCS1_FIELD(GUEST_PDPTR3, guest_pdptr3,
198 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
199 	EVMCS1_FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions,
200 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
201 	EVMCS1_FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp,
202 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
203 	EVMCS1_FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip,
204 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
205 	EVMCS1_FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask,
206 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
207 	EVMCS1_FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask,
208 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
209 	EVMCS1_FIELD(CR0_READ_SHADOW, cr0_read_shadow,
210 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
211 	EVMCS1_FIELD(CR4_READ_SHADOW, cr4_read_shadow,
212 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
213 	EVMCS1_FIELD(GUEST_CR0, guest_cr0,
214 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
215 	EVMCS1_FIELD(GUEST_CR3, guest_cr3,
216 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
217 	EVMCS1_FIELD(GUEST_CR4, guest_cr4,
218 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
219 	EVMCS1_FIELD(GUEST_DR7, guest_dr7,
220 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
221 	EVMCS1_FIELD(HOST_FS_BASE, host_fs_base,
222 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
223 	EVMCS1_FIELD(HOST_GS_BASE, host_gs_base,
224 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
225 	EVMCS1_FIELD(HOST_TR_BASE, host_tr_base,
226 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
227 	EVMCS1_FIELD(HOST_GDTR_BASE, host_gdtr_base,
228 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
229 	EVMCS1_FIELD(HOST_IDTR_BASE, host_idtr_base,
230 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
231 	EVMCS1_FIELD(HOST_RSP, host_rsp,
232 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
233 	EVMCS1_FIELD(EPT_POINTER, ept_pointer,
234 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
235 	EVMCS1_FIELD(GUEST_BNDCFGS, guest_bndcfgs,
236 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
237 	EVMCS1_FIELD(XSS_EXIT_BITMAP, xss_exit_bitmap,
238 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
239 	EVMCS1_FIELD(ENCLS_EXITING_BITMAP, encls_exiting_bitmap,
240 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
241 	EVMCS1_FIELD(TSC_MULTIPLIER, tsc_multiplier,
242 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
243 	/*
244 	 * Not used by KVM:
245 	 *
246 	 * EVMCS1_FIELD(0x00006828, guest_ia32_s_cet,
247 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
248 	 * EVMCS1_FIELD(0x0000682A, guest_ssp,
249 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
250 	 * EVMCS1_FIELD(0x0000682C, guest_ia32_int_ssp_table_addr,
251 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
252 	 * EVMCS1_FIELD(0x00002816, guest_ia32_lbr_ctl,
253 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
254 	 * EVMCS1_FIELD(0x00006C18, host_ia32_s_cet,
255 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
256 	 * EVMCS1_FIELD(0x00006C1A, host_ssp,
257 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
258 	 * EVMCS1_FIELD(0x00006C1C, host_ia32_int_ssp_table_addr,
259 	 *	     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
260 	 */
261 
262 	/* 64 bit read only */
263 	EVMCS1_FIELD(GUEST_PHYSICAL_ADDRESS, guest_physical_address,
264 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
265 	EVMCS1_FIELD(EXIT_QUALIFICATION, exit_qualification,
266 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
267 	/*
268 	 * Not defined in KVM:
269 	 *
270 	 * EVMCS1_FIELD(0x00006402, exit_io_instruction_ecx,
271 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
272 	 * EVMCS1_FIELD(0x00006404, exit_io_instruction_esi,
273 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
274 	 * EVMCS1_FIELD(0x00006406, exit_io_instruction_esi,
275 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
276 	 * EVMCS1_FIELD(0x00006408, exit_io_instruction_eip,
277 	 *		HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
278 	 */
279 	EVMCS1_FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address,
280 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
281 
282 	/*
283 	 * No mask defined in the spec as Hyper-V doesn't currently support
284 	 * these. Future proof by resetting the whole clean field mask on
285 	 * access.
286 	 */
287 	EVMCS1_FIELD(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr,
288 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
289 	EVMCS1_FIELD(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr,
290 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
291 	EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr,
292 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
293 
294 	/* 32 bit rw */
295 	EVMCS1_FIELD(TPR_THRESHOLD, tpr_threshold,
296 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
297 	EVMCS1_FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info,
298 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
299 	EVMCS1_FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control,
300 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC),
301 	EVMCS1_FIELD(EXCEPTION_BITMAP, exception_bitmap,
302 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN),
303 	EVMCS1_FIELD(VM_ENTRY_CONTROLS, vm_entry_controls,
304 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY),
305 	EVMCS1_FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field,
306 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
307 	EVMCS1_FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE,
308 		     vm_entry_exception_error_code,
309 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
310 	EVMCS1_FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len,
311 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
312 	EVMCS1_FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs,
313 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
314 	EVMCS1_FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control,
315 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
316 	EVMCS1_FIELD(VM_EXIT_CONTROLS, vm_exit_controls,
317 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
318 	EVMCS1_FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control,
319 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
320 	EVMCS1_FIELD(GUEST_ES_LIMIT, guest_es_limit,
321 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
322 	EVMCS1_FIELD(GUEST_CS_LIMIT, guest_cs_limit,
323 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
324 	EVMCS1_FIELD(GUEST_SS_LIMIT, guest_ss_limit,
325 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
326 	EVMCS1_FIELD(GUEST_DS_LIMIT, guest_ds_limit,
327 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
328 	EVMCS1_FIELD(GUEST_FS_LIMIT, guest_fs_limit,
329 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
330 	EVMCS1_FIELD(GUEST_GS_LIMIT, guest_gs_limit,
331 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
332 	EVMCS1_FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit,
333 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
334 	EVMCS1_FIELD(GUEST_TR_LIMIT, guest_tr_limit,
335 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
336 	EVMCS1_FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit,
337 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
338 	EVMCS1_FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit,
339 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
340 	EVMCS1_FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes,
341 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
342 	EVMCS1_FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes,
343 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
344 	EVMCS1_FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes,
345 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
346 	EVMCS1_FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes,
347 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
348 	EVMCS1_FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes,
349 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
350 	EVMCS1_FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes,
351 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
352 	EVMCS1_FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes,
353 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
354 	EVMCS1_FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes,
355 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
356 	EVMCS1_FIELD(GUEST_ACTIVITY_STATE, guest_activity_state,
357 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
358 	EVMCS1_FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs,
359 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
360 
361 	/* 32 bit read only */
362 	EVMCS1_FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error,
363 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
364 	EVMCS1_FIELD(VM_EXIT_REASON, vm_exit_reason,
365 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
366 	EVMCS1_FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info,
367 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
368 	EVMCS1_FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code,
369 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
370 	EVMCS1_FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field,
371 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
372 	EVMCS1_FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code,
373 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
374 	EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len,
375 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
376 	EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info,
377 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
378 
379 	/* No mask defined in the spec (not used) */
380 	EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask,
381 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
382 	EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match,
383 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
384 	EVMCS1_FIELD(CR3_TARGET_COUNT, cr3_target_count,
385 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
386 	EVMCS1_FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count,
387 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
388 	EVMCS1_FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count,
389 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
390 	EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count,
391 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
392 
393 	/* 16 bit rw */
394 	EVMCS1_FIELD(HOST_ES_SELECTOR, host_es_selector,
395 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
396 	EVMCS1_FIELD(HOST_CS_SELECTOR, host_cs_selector,
397 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
398 	EVMCS1_FIELD(HOST_SS_SELECTOR, host_ss_selector,
399 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
400 	EVMCS1_FIELD(HOST_DS_SELECTOR, host_ds_selector,
401 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
402 	EVMCS1_FIELD(HOST_FS_SELECTOR, host_fs_selector,
403 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
404 	EVMCS1_FIELD(HOST_GS_SELECTOR, host_gs_selector,
405 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
406 	EVMCS1_FIELD(HOST_TR_SELECTOR, host_tr_selector,
407 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
408 	EVMCS1_FIELD(GUEST_ES_SELECTOR, guest_es_selector,
409 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
410 	EVMCS1_FIELD(GUEST_CS_SELECTOR, guest_cs_selector,
411 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
412 	EVMCS1_FIELD(GUEST_SS_SELECTOR, guest_ss_selector,
413 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
414 	EVMCS1_FIELD(GUEST_DS_SELECTOR, guest_ds_selector,
415 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
416 	EVMCS1_FIELD(GUEST_FS_SELECTOR, guest_fs_selector,
417 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
418 	EVMCS1_FIELD(GUEST_GS_SELECTOR, guest_gs_selector,
419 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
420 	EVMCS1_FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector,
421 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
422 	EVMCS1_FIELD(GUEST_TR_SELECTOR, guest_tr_selector,
423 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
424 	EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id,
425 		     HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
426 };
427 const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
428 
nested_get_evmptr(struct kvm_vcpu * vcpu)429 u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
430 {
431 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
432 
433 	if (unlikely(kvm_hv_get_assist_page(vcpu)))
434 		return EVMPTR_INVALID;
435 
436 	if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
437 		return EVMPTR_INVALID;
438 
439 	return hv_vcpu->vp_assist_page.current_nested_vmcs;
440 }
441 
nested_get_evmcs_version(struct kvm_vcpu * vcpu)442 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
443 {
444 	/*
445 	 * vmcs_version represents the range of supported Enlightened VMCS
446 	 * versions: lower 8 bits is the minimal version, higher 8 bits is the
447 	 * maximum supported version. KVM supports versions from 1 to
448 	 * KVM_EVMCS_VERSION.
449 	 *
450 	 * Note, do not check the Hyper-V is fully enabled in guest CPUID, this
451 	 * helper is used to _get_ the vCPU's supported CPUID.
452 	 */
453 	if (kvm_cpu_cap_get(X86_FEATURE_VMX) &&
454 	    (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
455 		return (KVM_EVMCS_VERSION << 8) | 1;
456 
457 	return 0;
458 }
459 
460 enum evmcs_revision {
461 	EVMCSv1_LEGACY,
462 	NR_EVMCS_REVISIONS,
463 };
464 
465 enum evmcs_ctrl_type {
466 	EVMCS_EXIT_CTRLS,
467 	EVMCS_ENTRY_CTRLS,
468 	EVMCS_EXEC_CTRL,
469 	EVMCS_2NDEXEC,
470 	EVMCS_3RDEXEC,
471 	EVMCS_PINCTRL,
472 	EVMCS_VMFUNC,
473 	NR_EVMCS_CTRLS,
474 };
475 
476 static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = {
477 	[EVMCS_EXIT_CTRLS] = {
478 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMEXIT_CTRL,
479 	},
480 	[EVMCS_ENTRY_CTRLS] = {
481 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMENTRY_CTRL,
482 	},
483 	[EVMCS_EXEC_CTRL] = {
484 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_EXEC_CTRL,
485 	},
486 	[EVMCS_2NDEXEC] = {
487 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING,
488 	},
489 	[EVMCS_3RDEXEC] = {
490 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_3RDEXEC,
491 	},
492 	[EVMCS_PINCTRL] = {
493 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL,
494 	},
495 	[EVMCS_VMFUNC] = {
496 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMFUNC,
497 	},
498 };
499 
evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)500 static u32 evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)
501 {
502 	enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY;
503 
504 	return evmcs_supported_ctrls[ctrl_type][evmcs_rev];
505 }
506 
evmcs_has_perf_global_ctrl(struct kvm_vcpu * vcpu)507 static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu)
508 {
509 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
510 
511 	/*
512 	 * PERF_GLOBAL_CTRL has a quirk where some Windows guests may fail to
513 	 * boot if a PV CPUID feature flag is not also set.  Treat the fields
514 	 * as unsupported if the flag is not set in guest CPUID.  This should
515 	 * be called only for guest accesses, and all guest accesses should be
516 	 * gated on Hyper-V being enabled and initialized.
517 	 */
518 	if (WARN_ON_ONCE(!hv_vcpu))
519 		return false;
520 
521 	return hv_vcpu->cpuid_cache.nested_ebx & HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
522 }
523 
nested_evmcs_filter_control_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 * pdata)524 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
525 {
526 	u32 ctl_low = (u32)*pdata;
527 	u32 ctl_high = (u32)(*pdata >> 32);
528 	u32 supported_ctrls;
529 
530 	/*
531 	 * Hyper-V 2016 and 2019 try using these features even when eVMCS
532 	 * is enabled but there are no corresponding fields.
533 	 */
534 	switch (msr_index) {
535 	case MSR_IA32_VMX_EXIT_CTLS:
536 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
537 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_EXIT_CTRLS);
538 		if (!evmcs_has_perf_global_ctrl(vcpu))
539 			supported_ctrls &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
540 		ctl_high &= supported_ctrls;
541 		break;
542 	case MSR_IA32_VMX_ENTRY_CTLS:
543 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
544 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_ENTRY_CTRLS);
545 		if (!evmcs_has_perf_global_ctrl(vcpu))
546 			supported_ctrls &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
547 		ctl_high &= supported_ctrls;
548 		break;
549 	case MSR_IA32_VMX_PROCBASED_CTLS:
550 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
551 		ctl_high &= evmcs_get_supported_ctls(EVMCS_EXEC_CTRL);
552 		break;
553 	case MSR_IA32_VMX_PROCBASED_CTLS2:
554 		ctl_high &= evmcs_get_supported_ctls(EVMCS_2NDEXEC);
555 		break;
556 	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
557 	case MSR_IA32_VMX_PINBASED_CTLS:
558 		ctl_high &= evmcs_get_supported_ctls(EVMCS_PINCTRL);
559 		break;
560 	case MSR_IA32_VMX_VMFUNC:
561 		ctl_low &= evmcs_get_supported_ctls(EVMCS_VMFUNC);
562 		break;
563 	}
564 
565 	*pdata = ctl_low | ((u64)ctl_high << 32);
566 }
567 
nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,u32 val)568 static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,
569 					   u32 val)
570 {
571 	return !(val & ~evmcs_get_supported_ctls(ctrl_type));
572 }
573 
nested_evmcs_check_controls(struct vmcs12 * vmcs12)574 int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
575 {
576 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_PINCTRL,
577 					       vmcs12->pin_based_vm_exec_control)))
578 		return -EINVAL;
579 
580 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXEC_CTRL,
581 					       vmcs12->cpu_based_vm_exec_control)))
582 		return -EINVAL;
583 
584 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC,
585 					       vmcs12->secondary_vm_exec_control)))
586 		return -EINVAL;
587 
588 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXIT_CTRLS,
589 					       vmcs12->vm_exit_controls)))
590 		return -EINVAL;
591 
592 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_ENTRY_CTRLS,
593 					       vmcs12->vm_entry_controls)))
594 		return -EINVAL;
595 
596 	/*
597 	 * VM-Func controls are 64-bit, but KVM currently doesn't support any
598 	 * controls in bits 63:32, i.e. dropping those bits on the consistency
599 	 * check is intentional.
600 	 */
601 	if (WARN_ON_ONCE(vmcs12->vm_function_control >> 32))
602 		return -EINVAL;
603 
604 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_VMFUNC,
605 					       vmcs12->vm_function_control)))
606 		return -EINVAL;
607 
608 	return 0;
609 }
610 
611 #if IS_ENABLED(CONFIG_HYPERV)
612 DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
613 
614 /*
615  * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
616  * is: in case a feature has corresponding fields in eVMCS described and it was
617  * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
618  * feature which has no corresponding eVMCS field, this likely means that KVM
619  * needs to be updated.
620  */
621 #define evmcs_check_vmcs_conf(field, ctrl)					\
622 	do {									\
623 		typeof(vmcs_conf->field) unsupported;				\
624 										\
625 		unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl;	\
626 		if (unsupported) {						\
627 			pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
628 				     (u64)unsupported);				\
629 			vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl;		\
630 		}								\
631 	}									\
632 	while (0)
633 
evmcs_sanitize_exec_ctrls(struct vmcs_config * vmcs_conf)634 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
635 {
636 	evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
637 	evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
638 	evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
639 	evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
640 	evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
641 	evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
642 }
643 #endif
644 
nested_enable_evmcs(struct kvm_vcpu * vcpu,uint16_t * vmcs_version)645 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
646 			uint16_t *vmcs_version)
647 {
648 	struct vcpu_vmx *vmx = to_vmx(vcpu);
649 
650 	vmx->nested.enlightened_vmcs_enabled = true;
651 
652 	if (vmcs_version)
653 		*vmcs_version = nested_get_evmcs_version(vcpu);
654 
655 	return 0;
656 }
657 
nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu * vcpu)658 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
659 {
660 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
661 	struct vcpu_vmx *vmx = to_vmx(vcpu);
662 	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
663 
664 	if (!hv_vcpu || !evmcs)
665 		return false;
666 
667 	if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
668 		return false;
669 
670 	return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
671 }
672 
vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu * vcpu)673 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
674 {
675 	nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
676 }
677