xref: /openbmc/linux/arch/x86/kvm/vmx/nested.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
155d2375eSSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
255d2375eSSean Christopherson #ifndef __KVM_X86_VMX_NESTED_H
355d2375eSSean Christopherson #define __KVM_X86_VMX_NESTED_H
455d2375eSSean Christopherson 
555d2375eSSean Christopherson #include "kvm_cache_regs.h"
655d2375eSSean Christopherson #include "vmcs12.h"
755d2375eSSean Christopherson #include "vmx.h"
855d2375eSSean Christopherson 
9671ddc70SJim Mattson /*
10671ddc70SJim Mattson  * Status returned by nested_vmx_enter_non_root_mode():
11671ddc70SJim Mattson  */
12671ddc70SJim Mattson enum nvmx_vmentry_status {
13671ddc70SJim Mattson 	NVMX_VMENTRY_SUCCESS,		/* Entered VMX non-root mode */
14671ddc70SJim Mattson 	NVMX_VMENTRY_VMFAIL,		/* Consistency check VMFail */
15671ddc70SJim Mattson 	NVMX_VMENTRY_VMEXIT,		/* Consistency check VMExit */
16671ddc70SJim Mattson 	NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
17671ddc70SJim Mattson };
18671ddc70SJim Mattson 
1955d2375eSSean Christopherson void vmx_leave_nested(struct kvm_vcpu *vcpu);
20bcdf201fSVitaly Kuznetsov void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps);
2155d2375eSSean Christopherson void nested_vmx_hardware_unsetup(void);
226c1c6e58SSean Christopherson __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
231b84292bSXiaoyao Li void nested_vmx_set_vmcs_shadowing_bitmap(void);
2455d2375eSSean Christopherson void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
25671ddc70SJim Mattson enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
26671ddc70SJim Mattson 						     bool from_vmentry);
27f47baaedSSean Christopherson bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
284dcefa31SSean Christopherson void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
2955d2375eSSean Christopherson 		       u32 exit_intr_info, unsigned long exit_qualification);
303731905eSSean Christopherson void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
3155d2375eSSean Christopherson int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
3255d2375eSSean Christopherson int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
3355d2375eSSean Christopherson int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
34fdb28619SEugene Korenevsky 			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
3596b100cdSPaolo Bonzini void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
36e71237d3SOliver Upton bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
37e71237d3SOliver Upton 				 int size);
3855d2375eSSean Christopherson 
get_vmcs12(struct kvm_vcpu * vcpu)3955d2375eSSean Christopherson static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
4055d2375eSSean Christopherson {
4155d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.cached_vmcs12;
4255d2375eSSean Christopherson }
4355d2375eSSean Christopherson 
get_shadow_vmcs12(struct kvm_vcpu * vcpu)4455d2375eSSean Christopherson static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
4555d2375eSSean Christopherson {
4655d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
4755d2375eSSean Christopherson }
4855d2375eSSean Christopherson 
490f02bd0aSPaolo Bonzini /*
500f02bd0aSPaolo Bonzini  * Note: the same condition is checked against the state provided by userspace
510f02bd0aSPaolo Bonzini  * in vmx_set_nested_state; if it is satisfied, the nested state must include
520f02bd0aSPaolo Bonzini  * the VMCS12.
530f02bd0aSPaolo Bonzini  */
vmx_has_valid_vmcs12(struct kvm_vcpu * vcpu)5455d2375eSSean Christopherson static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
5555d2375eSSean Christopherson {
5655d2375eSSean Christopherson 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5755d2375eSSean Christopherson 
5827849968SVitaly Kuznetsov 	/* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
5927849968SVitaly Kuznetsov 	return vmx->nested.current_vmptr != -1ull ||
6027849968SVitaly Kuznetsov 		vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
6155d2375eSSean Christopherson }
6255d2375eSSean Christopherson 
nested_get_vpid02(struct kvm_vcpu * vcpu)6325d8b843SSean Christopherson static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
6425d8b843SSean Christopherson {
6525d8b843SSean Christopherson 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6625d8b843SSean Christopherson 
6725d8b843SSean Christopherson 	return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
6825d8b843SSean Christopherson }
6925d8b843SSean Christopherson 
nested_ept_get_eptp(struct kvm_vcpu * vcpu)70ac69dfaaSSean Christopherson static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
7155d2375eSSean Christopherson {
7255d2375eSSean Christopherson 	/* return the page table to be shadowed - in our case, EPT12 */
7355d2375eSSean Christopherson 	return get_vmcs12(vcpu)->ept_pointer;
7455d2375eSSean Christopherson }
7555d2375eSSean Christopherson 
nested_ept_ad_enabled(struct kvm_vcpu * vcpu)7655d2375eSSean Christopherson static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
7755d2375eSSean Christopherson {
78ac69dfaaSSean Christopherson 	return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
7955d2375eSSean Christopherson }
8055d2375eSSean Christopherson 
8155d2375eSSean Christopherson /*
824a8fd4a7SSean Christopherson  * Return the cr0/4 value that a nested guest would read. This is a combination
834a8fd4a7SSean Christopherson  * of L1's "real" cr0 used to run the guest (guest_cr0), and the bits shadowed
844a8fd4a7SSean Christopherson  * by the L1 hypervisor (cr0_read_shadow).  KVM must emulate CPU behavior as
854a8fd4a7SSean Christopherson  * the value+mask loaded into vmcs02 may not match the vmcs12 fields.
8655d2375eSSean Christopherson  */
nested_read_cr0(struct vmcs12 * fields)8755d2375eSSean Christopherson static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
8855d2375eSSean Christopherson {
8955d2375eSSean Christopherson 	return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
9055d2375eSSean Christopherson 		(fields->cr0_read_shadow & fields->cr0_guest_host_mask);
9155d2375eSSean Christopherson }
nested_read_cr4(struct vmcs12 * fields)9255d2375eSSean Christopherson static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
9355d2375eSSean Christopherson {
9455d2375eSSean Christopherson 	return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
9555d2375eSSean Christopherson 		(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
9655d2375eSSean Christopherson }
9755d2375eSSean Christopherson 
nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu * vcpu)9855d2375eSSean Christopherson static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
9955d2375eSSean Christopherson {
10055d2375eSSean Christopherson 	return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
10155d2375eSSean Christopherson }
10255d2375eSSean Christopherson 
10355d2375eSSean Christopherson /*
10455d2375eSSean Christopherson  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
10555d2375eSSean Christopherson  * to modify any valid field of the VMCS, or are the VM-exit
10655d2375eSSean Christopherson  * information fields read-only?
10755d2375eSSean Christopherson  */
nested_cpu_has_vmwrite_any_field(struct kvm_vcpu * vcpu)10855d2375eSSean Christopherson static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
10955d2375eSSean Christopherson {
11055d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.misc_low &
11155d2375eSSean Christopherson 		MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
11255d2375eSSean Christopherson }
11355d2375eSSean Christopherson 
nested_cpu_has_zero_length_injection(struct kvm_vcpu * vcpu)11455d2375eSSean Christopherson static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
11555d2375eSSean Christopherson {
11655d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
11755d2375eSSean Christopherson }
11855d2375eSSean Christopherson 
nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu * vcpu)11955d2375eSSean Christopherson static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
12055d2375eSSean Christopherson {
12155d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
12255d2375eSSean Christopherson 			CPU_BASED_MONITOR_TRAP_FLAG;
12355d2375eSSean Christopherson }
12455d2375eSSean Christopherson 
nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu * vcpu)12555d2375eSSean Christopherson static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
12655d2375eSSean Christopherson {
12755d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
12855d2375eSSean Christopherson 		SECONDARY_EXEC_SHADOW_VMCS;
12955d2375eSSean Christopherson }
13055d2375eSSean Christopherson 
nested_cpu_has(struct vmcs12 * vmcs12,u32 bit)13155d2375eSSean Christopherson static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
13255d2375eSSean Christopherson {
13355d2375eSSean Christopherson 	return vmcs12->cpu_based_vm_exec_control & bit;
13455d2375eSSean Christopherson }
13555d2375eSSean Christopherson 
nested_cpu_has2(struct vmcs12 * vmcs12,u32 bit)13655d2375eSSean Christopherson static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
13755d2375eSSean Christopherson {
13855d2375eSSean Christopherson 	return (vmcs12->cpu_based_vm_exec_control &
13955d2375eSSean Christopherson 			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
14055d2375eSSean Christopherson 		(vmcs12->secondary_vm_exec_control & bit);
14155d2375eSSean Christopherson }
14255d2375eSSean Christopherson 
nested_cpu_has_preemption_timer(struct vmcs12 * vmcs12)14355d2375eSSean Christopherson static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
14455d2375eSSean Christopherson {
14555d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control &
14655d2375eSSean Christopherson 		PIN_BASED_VMX_PREEMPTION_TIMER;
14755d2375eSSean Christopherson }
14855d2375eSSean Christopherson 
nested_cpu_has_nmi_exiting(struct vmcs12 * vmcs12)14955d2375eSSean Christopherson static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
15055d2375eSSean Christopherson {
15155d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
15255d2375eSSean Christopherson }
15355d2375eSSean Christopherson 
nested_cpu_has_virtual_nmis(struct vmcs12 * vmcs12)15455d2375eSSean Christopherson static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
15555d2375eSSean Christopherson {
15655d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
15755d2375eSSean Christopherson }
15855d2375eSSean Christopherson 
nested_cpu_has_mtf(struct vmcs12 * vmcs12)1595ef8acbdSOliver Upton static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
1605ef8acbdSOliver Upton {
1615ef8acbdSOliver Upton 	return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
1625ef8acbdSOliver Upton }
1635ef8acbdSOliver Upton 
nested_cpu_has_ept(struct vmcs12 * vmcs12)16455d2375eSSean Christopherson static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
16555d2375eSSean Christopherson {
16655d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
16755d2375eSSean Christopherson }
16855d2375eSSean Christopherson 
nested_cpu_has_xsaves(struct vmcs12 * vmcs12)16955d2375eSSean Christopherson static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
17055d2375eSSean Christopherson {
171*662f6815SSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
17255d2375eSSean Christopherson }
17355d2375eSSean Christopherson 
nested_cpu_has_pml(struct vmcs12 * vmcs12)17455d2375eSSean Christopherson static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
17555d2375eSSean Christopherson {
17655d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
17755d2375eSSean Christopherson }
17855d2375eSSean Christopherson 
nested_cpu_has_virt_x2apic_mode(struct vmcs12 * vmcs12)17955d2375eSSean Christopherson static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
18055d2375eSSean Christopherson {
18155d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
18255d2375eSSean Christopherson }
18355d2375eSSean Christopherson 
nested_cpu_has_vpid(struct vmcs12 * vmcs12)18455d2375eSSean Christopherson static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
18555d2375eSSean Christopherson {
18655d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
18755d2375eSSean Christopherson }
18855d2375eSSean Christopherson 
nested_cpu_has_apic_reg_virt(struct vmcs12 * vmcs12)18955d2375eSSean Christopherson static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
19055d2375eSSean Christopherson {
19155d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
19255d2375eSSean Christopherson }
19355d2375eSSean Christopherson 
nested_cpu_has_vid(struct vmcs12 * vmcs12)19455d2375eSSean Christopherson static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
19555d2375eSSean Christopherson {
19655d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
19755d2375eSSean Christopherson }
19855d2375eSSean Christopherson 
nested_cpu_has_posted_intr(struct vmcs12 * vmcs12)19955d2375eSSean Christopherson static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
20055d2375eSSean Christopherson {
20155d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
20255d2375eSSean Christopherson }
20355d2375eSSean Christopherson 
nested_cpu_has_vmfunc(struct vmcs12 * vmcs12)20455d2375eSSean Christopherson static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
20555d2375eSSean Christopherson {
20655d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
20755d2375eSSean Christopherson }
20855d2375eSSean Christopherson 
nested_cpu_has_eptp_switching(struct vmcs12 * vmcs12)20955d2375eSSean Christopherson static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
21055d2375eSSean Christopherson {
21155d2375eSSean Christopherson 	return nested_cpu_has_vmfunc(vmcs12) &&
21255d2375eSSean Christopherson 		(vmcs12->vm_function_control &
21355d2375eSSean Christopherson 		 VMX_VMFUNC_EPTP_SWITCHING);
21455d2375eSSean Christopherson }
21555d2375eSSean Christopherson 
nested_cpu_has_shadow_vmcs(struct vmcs12 * vmcs12)21655d2375eSSean Christopherson static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
21755d2375eSSean Christopherson {
21855d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
21955d2375eSSean Christopherson }
22055d2375eSSean Christopherson 
nested_cpu_has_save_preemption_timer(struct vmcs12 * vmcs12)22155d2375eSSean Christopherson static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
22255d2375eSSean Christopherson {
22355d2375eSSean Christopherson 	return vmcs12->vm_exit_controls &
22455d2375eSSean Christopherson 	    VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
22555d2375eSSean Christopherson }
22655d2375eSSean Christopherson 
nested_exit_on_nmi(struct kvm_vcpu * vcpu)227429ab576SSean Christopherson static inline bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
228429ab576SSean Christopherson {
229429ab576SSean Christopherson 	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
230429ab576SSean Christopherson }
231429ab576SSean Christopherson 
23255d2375eSSean Christopherson /*
23355d2375eSSean Christopherson  * In nested virtualization, check if L1 asked to exit on external interrupts.
23455d2375eSSean Christopherson  * For most existing hypervisors, this will always return true.
23555d2375eSSean Christopherson  */
nested_exit_on_intr(struct kvm_vcpu * vcpu)23655d2375eSSean Christopherson static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
23755d2375eSSean Christopherson {
23855d2375eSSean Christopherson 	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
23955d2375eSSean Christopherson 		PIN_BASED_EXT_INTR_MASK;
24055d2375eSSean Christopherson }
24155d2375eSSean Christopherson 
nested_cpu_has_encls_exit(struct vmcs12 * vmcs12)24272add915SSean Christopherson static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
24372add915SSean Christopherson {
24472add915SSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
24572add915SSean Christopherson }
24672add915SSean Christopherson 
24755d2375eSSean Christopherson /*
24855d2375eSSean Christopherson  * if fixed0[i] == 1: val[i] must be 1
24955d2375eSSean Christopherson  * if fixed1[i] == 0: val[i] must be 0
25055d2375eSSean Christopherson  */
fixed_bits_valid(u64 val,u64 fixed0,u64 fixed1)25155d2375eSSean Christopherson static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
25255d2375eSSean Christopherson {
25355d2375eSSean Christopherson 	return ((val & fixed1) | fixed0) == val;
25455d2375eSSean Christopherson }
25555d2375eSSean Christopherson 
nested_guest_cr0_valid(struct kvm_vcpu * vcpu,unsigned long val)256d4069dbeSPaolo Bonzini static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
25755d2375eSSean Christopherson {
25855d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
25955d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
26055d2375eSSean Christopherson 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
26155d2375eSSean Christopherson 
26255d2375eSSean Christopherson 	if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
26355d2375eSSean Christopherson 		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
26455d2375eSSean Christopherson 	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
26555d2375eSSean Christopherson 		fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
26655d2375eSSean Christopherson 
26755d2375eSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1);
26855d2375eSSean Christopherson }
26955d2375eSSean Christopherson 
nested_host_cr0_valid(struct kvm_vcpu * vcpu,unsigned long val)270d4069dbeSPaolo Bonzini static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
27155d2375eSSean Christopherson {
27255d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
27355d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
27455d2375eSSean Christopherson 
27555d2375eSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1);
27655d2375eSSean Christopherson }
27755d2375eSSean Christopherson 
nested_cr4_valid(struct kvm_vcpu * vcpu,unsigned long val)278d4069dbeSPaolo Bonzini static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
27955d2375eSSean Christopherson {
28055d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
28155d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
28255d2375eSSean Christopherson 
283ca58f3aaSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1) &&
284ca58f3aaSSean Christopherson 	       __kvm_is_valid_cr4(vcpu, val);
28555d2375eSSean Christopherson }
28655d2375eSSean Christopherson 
28755d2375eSSean Christopherson /* No difference in the restrictions on guest and host CR4 in VMX operation. */
28855d2375eSSean Christopherson #define nested_guest_cr4_valid	nested_cr4_valid
28955d2375eSSean Christopherson #define nested_host_cr4_valid	nested_cr4_valid
29055d2375eSSean Christopherson 
29133b22172SPaolo Bonzini extern struct kvm_x86_nested_ops vmx_nested_ops;
29233b22172SPaolo Bonzini 
29355d2375eSSean Christopherson #endif /* __KVM_X86_VMX_NESTED_H */
294