xref: /openbmc/linux/arch/x86/kvm/vmx/vmx.h (revision 2cc39179)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4 
5 #include <linux/kvm_host.h>
6 
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9 
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
13 #include "vmcs.h"
14 #include "vmx_ops.h"
15 #include "cpuid.h"
16 
17 #define MSR_TYPE_R	1
18 #define MSR_TYPE_W	2
19 #define MSR_TYPE_RW	3
20 
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22 
23 #ifdef CONFIG_X86_64
24 #define MAX_NR_USER_RETURN_MSRS	7
25 #else
26 #define MAX_NR_USER_RETURN_MSRS	4
27 #endif
28 
29 #define MAX_NR_LOADSTORE_MSRS	8
30 
31 struct vmx_msrs {
32 	unsigned int		nr;
33 	struct vmx_msr_entry	val[MAX_NR_LOADSTORE_MSRS];
34 };
35 
36 struct vmx_uret_msr {
37 	bool load_into_hardware;
38 	u64 data;
39 	u64 mask;
40 };
41 
42 enum segment_cache_field {
43 	SEG_FIELD_SEL = 0,
44 	SEG_FIELD_BASE = 1,
45 	SEG_FIELD_LIMIT = 2,
46 	SEG_FIELD_AR = 3,
47 
48 	SEG_FIELD_NR = 4
49 };
50 
51 #define RTIT_ADDR_RANGE		4
52 
53 struct pt_ctx {
54 	u64 ctl;
55 	u64 status;
56 	u64 output_base;
57 	u64 output_mask;
58 	u64 cr3_match;
59 	u64 addr_a[RTIT_ADDR_RANGE];
60 	u64 addr_b[RTIT_ADDR_RANGE];
61 };
62 
63 struct pt_desc {
64 	u64 ctl_bitmask;
65 	u32 num_address_ranges;
66 	u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
67 	struct pt_ctx host;
68 	struct pt_ctx guest;
69 };
70 
71 union vmx_exit_reason {
72 	struct {
73 		u32	basic			: 16;
74 		u32	reserved16		: 1;
75 		u32	reserved17		: 1;
76 		u32	reserved18		: 1;
77 		u32	reserved19		: 1;
78 		u32	reserved20		: 1;
79 		u32	reserved21		: 1;
80 		u32	reserved22		: 1;
81 		u32	reserved23		: 1;
82 		u32	reserved24		: 1;
83 		u32	reserved25		: 1;
84 		u32	bus_lock_detected	: 1;
85 		u32	enclave_mode		: 1;
86 		u32	smi_pending_mtf		: 1;
87 		u32	smi_from_vmx_root	: 1;
88 		u32	reserved30		: 1;
89 		u32	failed_vmentry		: 1;
90 	};
91 	u32 full;
92 };
93 
94 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
96 
97 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
98 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
99 
100 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
101 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
102 
103 struct lbr_desc {
104 	/* Basic info about guest LBR records. */
105 	struct x86_pmu_lbr records;
106 
107 	/*
108 	 * Emulate LBR feature via passthrough LBR registers when the
109 	 * per-vcpu guest LBR event is scheduled on the current pcpu.
110 	 *
111 	 * The records may be inaccurate if the host reclaims the LBR.
112 	 */
113 	struct perf_event *event;
114 
115 	/* True if LBRs are marked as not intercepted in the MSR bitmap */
116 	bool msr_passthrough;
117 };
118 
119 /*
120  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
121  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
122  */
123 struct nested_vmx {
124 	/* Has the level1 guest done vmxon? */
125 	bool vmxon;
126 	gpa_t vmxon_ptr;
127 	bool pml_full;
128 
129 	/* The guest-physical address of the current VMCS L1 keeps for L2 */
130 	gpa_t current_vmptr;
131 	/*
132 	 * Cache of the guest's VMCS, existing outside of guest memory.
133 	 * Loaded from guest memory during VMPTRLD. Flushed to guest
134 	 * memory during VMCLEAR and VMPTRLD.
135 	 */
136 	struct vmcs12 *cached_vmcs12;
137 	/*
138 	 * Cache of the guest's shadow VMCS, existing outside of guest
139 	 * memory. Loaded from guest memory during VM entry. Flushed
140 	 * to guest memory during VM exit.
141 	 */
142 	struct vmcs12 *cached_shadow_vmcs12;
143 
144 	/*
145 	 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
146 	 */
147 	struct gfn_to_hva_cache shadow_vmcs12_cache;
148 
149 	/*
150 	 * GPA to HVA cache for VMCS12
151 	 */
152 	struct gfn_to_hva_cache vmcs12_cache;
153 
154 	/*
155 	 * Indicates if the shadow vmcs or enlightened vmcs must be updated
156 	 * with the data held by struct vmcs12.
157 	 */
158 	bool need_vmcs12_to_shadow_sync;
159 	bool dirty_vmcs12;
160 
161 	/*
162 	 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
163 	 * changes in MSR bitmap for L1 or switching to a different L2. Note,
164 	 * this flag can only be used reliably in conjunction with a paravirt L1
165 	 * which informs L0 whether any changes to MSR bitmap for L2 were done
166 	 * on its side.
167 	 */
168 	bool force_msr_bitmap_recalc;
169 
170 	/*
171 	 * Indicates lazily loaded guest state has not yet been decached from
172 	 * vmcs02.
173 	 */
174 	bool need_sync_vmcs02_to_vmcs12_rare;
175 
176 	/*
177 	 * vmcs02 has been initialized, i.e. state that is constant for
178 	 * vmcs02 has been written to the backing VMCS.  Initialization
179 	 * is delayed until L1 actually attempts to run a nested VM.
180 	 */
181 	bool vmcs02_initialized;
182 
183 	bool change_vmcs01_virtual_apic_mode;
184 	bool reload_vmcs01_apic_access_page;
185 	bool update_vmcs01_cpu_dirty_logging;
186 	bool update_vmcs01_apicv_status;
187 
188 	/*
189 	 * Enlightened VMCS has been enabled. It does not mean that L1 has to
190 	 * use it. However, VMX features available to L1 will be limited based
191 	 * on what the enlightened VMCS supports.
192 	 */
193 	bool enlightened_vmcs_enabled;
194 
195 	/* L2 must run next, and mustn't decide to exit to L1. */
196 	bool nested_run_pending;
197 
198 	/* Pending MTF VM-exit into L1.  */
199 	bool mtf_pending;
200 
201 	struct loaded_vmcs vmcs02;
202 
203 	/*
204 	 * Guest pages referred to in the vmcs02 with host-physical
205 	 * pointers, so we must keep them pinned while L2 runs.
206 	 */
207 	struct page *apic_access_page;
208 	struct kvm_host_map virtual_apic_map;
209 	struct kvm_host_map pi_desc_map;
210 
211 	struct kvm_host_map msr_bitmap_map;
212 
213 	struct pi_desc *pi_desc;
214 	bool pi_pending;
215 	u16 posted_intr_nv;
216 
217 	struct hrtimer preemption_timer;
218 	u64 preemption_timer_deadline;
219 	bool has_preemption_timer_deadline;
220 	bool preemption_timer_expired;
221 
222 	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
223 	u64 vmcs01_debugctl;
224 	u64 vmcs01_guest_bndcfgs;
225 
226 	/* to migrate it to L1 if L2 writes to L1's CR8 directly */
227 	int l1_tpr_threshold;
228 
229 	u16 vpid02;
230 	u16 last_vpid;
231 
232 	struct nested_vmx_msrs msrs;
233 
234 	/* SMM related state */
235 	struct {
236 		/* in VMX operation on SMM entry? */
237 		bool vmxon;
238 		/* in guest mode on SMM entry? */
239 		bool guest_mode;
240 	} smm;
241 
242 	gpa_t hv_evmcs_vmptr;
243 	struct kvm_host_map hv_evmcs_map;
244 	struct hv_enlightened_vmcs *hv_evmcs;
245 };
246 
247 struct vcpu_vmx {
248 	struct kvm_vcpu       vcpu;
249 	u8                    fail;
250 	u8		      x2apic_msr_bitmap_mode;
251 
252 	/*
253 	 * If true, host state has been stored in vmx->loaded_vmcs for
254 	 * the CPU registers that only need to be switched when transitioning
255 	 * to/from the kernel, and the registers have been loaded with guest
256 	 * values.  If false, host state is loaded in the CPU registers
257 	 * and vmx->loaded_vmcs->host_state is invalid.
258 	 */
259 	bool		      guest_state_loaded;
260 
261 	unsigned long         exit_qualification;
262 	u32                   exit_intr_info;
263 	u32                   idt_vectoring_info;
264 	ulong                 rflags;
265 
266 	/*
267 	 * User return MSRs are always emulated when enabled in the guest, but
268 	 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
269 	 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
270 	 * be loaded into hardware if those conditions aren't met.
271 	 */
272 	struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
273 	bool                  guest_uret_msrs_loaded;
274 #ifdef CONFIG_X86_64
275 	u64		      msr_host_kernel_gs_base;
276 	u64		      msr_guest_kernel_gs_base;
277 #endif
278 
279 	u64		      spec_ctrl;
280 	u32		      msr_ia32_umwait_control;
281 
282 	/*
283 	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
284 	 * non-nested (L1) guest, it always points to vmcs01. For a nested
285 	 * guest (L2), it points to a different VMCS.
286 	 */
287 	struct loaded_vmcs    vmcs01;
288 	struct loaded_vmcs   *loaded_vmcs;
289 
290 	struct msr_autoload {
291 		struct vmx_msrs guest;
292 		struct vmx_msrs host;
293 	} msr_autoload;
294 
295 	struct msr_autostore {
296 		struct vmx_msrs guest;
297 	} msr_autostore;
298 
299 	struct {
300 		int vm86_active;
301 		ulong save_rflags;
302 		struct kvm_segment segs[8];
303 	} rmode;
304 	struct {
305 		u32 bitmask; /* 4 bits per segment (1 bit per field) */
306 		struct kvm_save_segment {
307 			u16 selector;
308 			unsigned long base;
309 			u32 limit;
310 			u32 ar;
311 		} seg[8];
312 	} segment_cache;
313 	int vpid;
314 	bool emulation_required;
315 
316 	union vmx_exit_reason exit_reason;
317 
318 	/* Posted interrupt descriptor */
319 	struct pi_desc pi_desc;
320 
321 	/* Used if this vCPU is waiting for PI notification wakeup. */
322 	struct list_head pi_wakeup_list;
323 
324 	/* Support for a guest hypervisor (nested VMX) */
325 	struct nested_vmx nested;
326 
327 	/* Dynamic PLE window. */
328 	unsigned int ple_window;
329 	bool ple_window_dirty;
330 
331 	bool req_immediate_exit;
332 
333 	/* Support for PML */
334 #define PML_ENTITY_NUM		512
335 	struct page *pml_pg;
336 
337 	/* apic deadline value in host tsc */
338 	u64 hv_deadline_tsc;
339 
340 	unsigned long host_debugctlmsr;
341 
342 	/*
343 	 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
344 	 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
345 	 * in msr_ia32_feature_control_valid_bits.
346 	 */
347 	u64 msr_ia32_feature_control;
348 	u64 msr_ia32_feature_control_valid_bits;
349 	/* SGX Launch Control public key hash */
350 	u64 msr_ia32_sgxlepubkeyhash[4];
351 	u64 msr_ia32_mcu_opt_ctrl;
352 	bool disable_fb_clear;
353 
354 	struct pt_desc pt_desc;
355 	struct lbr_desc lbr_desc;
356 
357 	/* Save desired MSR intercept (read: pass-through) state */
358 #define MAX_POSSIBLE_PASSTHROUGH_MSRS	15
359 	struct {
360 		DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
361 		DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
362 	} shadow_msr_intercept;
363 };
364 
365 struct kvm_vmx {
366 	struct kvm kvm;
367 
368 	unsigned int tss_addr;
369 	bool ept_identity_pagetable_done;
370 	gpa_t ept_identity_map_addr;
371 };
372 
373 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
374 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
375 			struct loaded_vmcs *buddy);
376 int allocate_vpid(void);
377 void free_vpid(int vpid);
378 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
379 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
380 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
381 			unsigned long fs_base, unsigned long gs_base);
382 int vmx_get_cpl(struct kvm_vcpu *vcpu);
383 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
384 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
385 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
386 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
387 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
388 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
389 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
390 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
391 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
392 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
393 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
394 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
395 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
396 
397 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
398 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
399 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
400 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
401 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
402 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
403 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
404 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
405 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
406 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
407 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
408 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
409 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
410 
411 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
412 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
413 
414 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
415 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
416 
417 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
418 					     int type, bool value)
419 {
420 	if (value)
421 		vmx_enable_intercept_for_msr(vcpu, msr, type);
422 	else
423 		vmx_disable_intercept_for_msr(vcpu, msr, type);
424 }
425 
426 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
427 
428 /*
429  * Note, early Intel manuals have the write-low and read-high bitmap offsets
430  * the wrong way round.  The bitmaps control MSRs 0x00000000-0x00001fff and
431  * 0xc0000000-0xc0001fff.  The former (low) uses bytes 0-0x3ff for reads and
432  * 0x800-0xbff for writes.  The latter (high) uses 0x400-0x7ff for reads and
433  * 0xc00-0xfff for writes.  MSRs not covered by either of the ranges always
434  * VM-Exit.
435  */
436 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)      \
437 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap,  \
438 						       u32 msr)		       \
439 {									       \
440 	int f = sizeof(unsigned long);					       \
441 									       \
442 	if (msr <= 0x1fff)						       \
443 		return bitop##_bit(msr, bitmap + base / f);		       \
444 	else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))		       \
445 		return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
446 	return (rtype)true;						       \
447 }
448 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)		       \
449 	__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0x0)     \
450 	__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
451 
452 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
453 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
454 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
455 
456 static inline u8 vmx_get_rvi(void)
457 {
458 	return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
459 }
460 
461 #define BUILD_CONTROLS_SHADOW(lname, uname)				    \
462 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val)	    \
463 {									    \
464 	if (vmx->loaded_vmcs->controls_shadow.lname != val) {		    \
465 		vmcs_write32(uname, val);				    \
466 		vmx->loaded_vmcs->controls_shadow.lname = val;		    \
467 	}								    \
468 }									    \
469 static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs)	    \
470 {									    \
471 	return vmcs->controls_shadow.lname;				    \
472 }									    \
473 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx)		    \
474 {									    \
475 	return __##lname##_controls_get(vmx->loaded_vmcs);		    \
476 }									    \
477 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val)   \
478 {									    \
479 	lname##_controls_set(vmx, lname##_controls_get(vmx) | val);	    \
480 }									    \
481 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
482 {									    \
483 	lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);	    \
484 }
485 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
486 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
487 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
488 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
489 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
490 
491 /*
492  * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
493  * cache on demand.  Other registers not listed here are synced to
494  * the cache immediately after VM-Exit.
495  */
496 #define VMX_REGS_LAZY_LOAD_SET	((1 << VCPU_REGS_RIP) |         \
497 				(1 << VCPU_REGS_RSP) |          \
498 				(1 << VCPU_EXREG_RFLAGS) |      \
499 				(1 << VCPU_EXREG_PDPTR) |       \
500 				(1 << VCPU_EXREG_SEGMENTS) |    \
501 				(1 << VCPU_EXREG_CR0) |         \
502 				(1 << VCPU_EXREG_CR3) |         \
503 				(1 << VCPU_EXREG_CR4) |         \
504 				(1 << VCPU_EXREG_EXIT_INFO_1) | \
505 				(1 << VCPU_EXREG_EXIT_INFO_2))
506 
507 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
508 {
509 	return container_of(kvm, struct kvm_vmx, kvm);
510 }
511 
512 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
513 {
514 	return container_of(vcpu, struct vcpu_vmx, vcpu);
515 }
516 
517 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
518 {
519 	struct vcpu_vmx *vmx = to_vmx(vcpu);
520 
521 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
522 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
523 		vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
524 	}
525 	return vmx->exit_qualification;
526 }
527 
528 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
529 {
530 	struct vcpu_vmx *vmx = to_vmx(vcpu);
531 
532 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
533 		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
534 		vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
535 	}
536 	return vmx->exit_intr_info;
537 }
538 
539 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
540 void free_vmcs(struct vmcs *vmcs);
541 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
542 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
543 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
544 
545 static inline struct vmcs *alloc_vmcs(bool shadow)
546 {
547 	return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
548 			      GFP_KERNEL_ACCOUNT);
549 }
550 
551 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
552 {
553 	return secondary_exec_controls_get(vmx) &
554 		SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
555 }
556 
557 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
558 {
559 	if (!enable_ept)
560 		return true;
561 
562 	return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
563 }
564 
565 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
566 {
567 	return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
568 	    (secondary_exec_controls_get(to_vmx(vcpu)) &
569 	    SECONDARY_EXEC_UNRESTRICTED_GUEST));
570 }
571 
572 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
573 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
574 {
575 	return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
576 }
577 
578 void dump_vmcs(struct kvm_vcpu *vcpu);
579 
580 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
581 {
582 	return (vmx_instr_info >> 28) & 0xf;
583 }
584 
585 #endif /* __KVM_X86_VMX_H */
586