xref: /openbmc/linux/arch/x86/kvm/vmx/nested.h (revision f47baaed4fef3e31baa750cf337fe62bfcaca31f)
155d2375eSSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
255d2375eSSean Christopherson #ifndef __KVM_X86_VMX_NESTED_H
355d2375eSSean Christopherson #define __KVM_X86_VMX_NESTED_H
455d2375eSSean Christopherson 
555d2375eSSean Christopherson #include "kvm_cache_regs.h"
655d2375eSSean Christopherson #include "vmcs12.h"
755d2375eSSean Christopherson #include "vmx.h"
855d2375eSSean Christopherson 
9671ddc70SJim Mattson /*
10671ddc70SJim Mattson  * Status returned by nested_vmx_enter_non_root_mode():
11671ddc70SJim Mattson  */
12671ddc70SJim Mattson enum nvmx_vmentry_status {
13671ddc70SJim Mattson 	NVMX_VMENTRY_SUCCESS,		/* Entered VMX non-root mode */
14671ddc70SJim Mattson 	NVMX_VMENTRY_VMFAIL,		/* Consistency check VMFail */
15671ddc70SJim Mattson 	NVMX_VMENTRY_VMEXIT,		/* Consistency check VMExit */
16671ddc70SJim Mattson 	NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
17671ddc70SJim Mattson };
18671ddc70SJim Mattson 
1955d2375eSSean Christopherson void vmx_leave_nested(struct kvm_vcpu *vcpu);
20a4443267SVitaly Kuznetsov void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
2155d2375eSSean Christopherson void nested_vmx_hardware_unsetup(void);
2272b0eaa9SSean Christopherson __init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
2372b0eaa9SSean Christopherson 				     int (*exit_handlers[])(struct kvm_vcpu *));
241b84292bSXiaoyao Li void nested_vmx_set_vmcs_shadowing_bitmap(void);
2555d2375eSSean Christopherson void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
26671ddc70SJim Mattson enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
27671ddc70SJim Mattson 						     bool from_vmentry);
28*f47baaedSSean Christopherson bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
2955d2375eSSean Christopherson void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3055d2375eSSean Christopherson 		       u32 exit_intr_info, unsigned long exit_qualification);
313731905eSSean Christopherson void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
3255d2375eSSean Christopherson int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
3355d2375eSSean Christopherson int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
3455d2375eSSean Christopherson int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
35fdb28619SEugene Korenevsky 			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
3603a8871aSOliver Upton void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
3796b100cdSPaolo Bonzini void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
38e71237d3SOliver Upton bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
39e71237d3SOliver Upton 				 int size);
4055d2375eSSean Christopherson 
4155d2375eSSean Christopherson static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
4255d2375eSSean Christopherson {
4355d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.cached_vmcs12;
4455d2375eSSean Christopherson }
4555d2375eSSean Christopherson 
4655d2375eSSean Christopherson static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
4755d2375eSSean Christopherson {
4855d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
4955d2375eSSean Christopherson }
5055d2375eSSean Christopherson 
5155d2375eSSean Christopherson static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
5255d2375eSSean Christopherson {
5355d2375eSSean Christopherson 	struct vcpu_vmx *vmx = to_vmx(vcpu);
5455d2375eSSean Christopherson 
5555d2375eSSean Christopherson 	/*
5655d2375eSSean Christopherson 	 * In case we do two consecutive get/set_nested_state()s while L2 was
5755d2375eSSean Christopherson 	 * running hv_evmcs may end up not being mapped (we map it from
5855d2375eSSean Christopherson 	 * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
5955d2375eSSean Christopherson 	 * have vmcs12 if it is true.
6055d2375eSSean Christopherson 	 */
6155d2375eSSean Christopherson 	return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
6255d2375eSSean Christopherson 		vmx->nested.hv_evmcs;
6355d2375eSSean Christopherson }
6455d2375eSSean Christopherson 
6525d8b843SSean Christopherson static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
6625d8b843SSean Christopherson {
6725d8b843SSean Christopherson 	struct vcpu_vmx *vmx = to_vmx(vcpu);
6825d8b843SSean Christopherson 
6925d8b843SSean Christopherson 	return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
7025d8b843SSean Christopherson }
7125d8b843SSean Christopherson 
72ac69dfaaSSean Christopherson static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
7355d2375eSSean Christopherson {
7455d2375eSSean Christopherson 	/* return the page table to be shadowed - in our case, EPT12 */
7555d2375eSSean Christopherson 	return get_vmcs12(vcpu)->ept_pointer;
7655d2375eSSean Christopherson }
7755d2375eSSean Christopherson 
7855d2375eSSean Christopherson static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
7955d2375eSSean Christopherson {
80ac69dfaaSSean Christopherson 	return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
8155d2375eSSean Christopherson }
8255d2375eSSean Christopherson 
8355d2375eSSean Christopherson /*
8455d2375eSSean Christopherson  * Return the cr0 value that a nested guest would read. This is a combination
8555d2375eSSean Christopherson  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
8655d2375eSSean Christopherson  * its hypervisor (cr0_read_shadow).
8755d2375eSSean Christopherson  */
8855d2375eSSean Christopherson static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
8955d2375eSSean Christopherson {
9055d2375eSSean Christopherson 	return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
9155d2375eSSean Christopherson 		(fields->cr0_read_shadow & fields->cr0_guest_host_mask);
9255d2375eSSean Christopherson }
9355d2375eSSean Christopherson static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
9455d2375eSSean Christopherson {
9555d2375eSSean Christopherson 	return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
9655d2375eSSean Christopherson 		(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
9755d2375eSSean Christopherson }
9855d2375eSSean Christopherson 
9955d2375eSSean Christopherson static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
10055d2375eSSean Christopherson {
10155d2375eSSean Christopherson 	return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
10255d2375eSSean Christopherson }
10355d2375eSSean Christopherson 
10455d2375eSSean Christopherson /*
10555d2375eSSean Christopherson  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
10655d2375eSSean Christopherson  * to modify any valid field of the VMCS, or are the VM-exit
10755d2375eSSean Christopherson  * information fields read-only?
10855d2375eSSean Christopherson  */
10955d2375eSSean Christopherson static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
11055d2375eSSean Christopherson {
11155d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.misc_low &
11255d2375eSSean Christopherson 		MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
11355d2375eSSean Christopherson }
11455d2375eSSean Christopherson 
11555d2375eSSean Christopherson static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
11655d2375eSSean Christopherson {
11755d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
11855d2375eSSean Christopherson }
11955d2375eSSean Christopherson 
12055d2375eSSean Christopherson static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
12155d2375eSSean Christopherson {
12255d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
12355d2375eSSean Christopherson 			CPU_BASED_MONITOR_TRAP_FLAG;
12455d2375eSSean Christopherson }
12555d2375eSSean Christopherson 
12655d2375eSSean Christopherson static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
12755d2375eSSean Christopherson {
12855d2375eSSean Christopherson 	return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
12955d2375eSSean Christopherson 		SECONDARY_EXEC_SHADOW_VMCS;
13055d2375eSSean Christopherson }
13155d2375eSSean Christopherson 
13255d2375eSSean Christopherson static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
13355d2375eSSean Christopherson {
13455d2375eSSean Christopherson 	return vmcs12->cpu_based_vm_exec_control & bit;
13555d2375eSSean Christopherson }
13655d2375eSSean Christopherson 
13755d2375eSSean Christopherson static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
13855d2375eSSean Christopherson {
13955d2375eSSean Christopherson 	return (vmcs12->cpu_based_vm_exec_control &
14055d2375eSSean Christopherson 			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
14155d2375eSSean Christopherson 		(vmcs12->secondary_vm_exec_control & bit);
14255d2375eSSean Christopherson }
14355d2375eSSean Christopherson 
14455d2375eSSean Christopherson static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
14555d2375eSSean Christopherson {
14655d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control &
14755d2375eSSean Christopherson 		PIN_BASED_VMX_PREEMPTION_TIMER;
14855d2375eSSean Christopherson }
14955d2375eSSean Christopherson 
15055d2375eSSean Christopherson static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
15155d2375eSSean Christopherson {
15255d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
15355d2375eSSean Christopherson }
15455d2375eSSean Christopherson 
15555d2375eSSean Christopherson static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
15655d2375eSSean Christopherson {
15755d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
15855d2375eSSean Christopherson }
15955d2375eSSean Christopherson 
1605ef8acbdSOliver Upton static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
1615ef8acbdSOliver Upton {
1625ef8acbdSOliver Upton 	return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
1635ef8acbdSOliver Upton }
1645ef8acbdSOliver Upton 
16555d2375eSSean Christopherson static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
16655d2375eSSean Christopherson {
16755d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
16855d2375eSSean Christopherson }
16955d2375eSSean Christopherson 
17055d2375eSSean Christopherson static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
17155d2375eSSean Christopherson {
17255d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
17355d2375eSSean Christopherson }
17455d2375eSSean Christopherson 
17555d2375eSSean Christopherson static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
17655d2375eSSean Christopherson {
17755d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
17855d2375eSSean Christopherson }
17955d2375eSSean Christopherson 
18055d2375eSSean Christopherson static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
18155d2375eSSean Christopherson {
18255d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
18355d2375eSSean Christopherson }
18455d2375eSSean Christopherson 
18555d2375eSSean Christopherson static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
18655d2375eSSean Christopherson {
18755d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
18855d2375eSSean Christopherson }
18955d2375eSSean Christopherson 
19055d2375eSSean Christopherson static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
19155d2375eSSean Christopherson {
19255d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
19355d2375eSSean Christopherson }
19455d2375eSSean Christopherson 
19555d2375eSSean Christopherson static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
19655d2375eSSean Christopherson {
19755d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
19855d2375eSSean Christopherson }
19955d2375eSSean Christopherson 
20055d2375eSSean Christopherson static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
20155d2375eSSean Christopherson {
20255d2375eSSean Christopherson 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
20355d2375eSSean Christopherson }
20455d2375eSSean Christopherson 
20555d2375eSSean Christopherson static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
20655d2375eSSean Christopherson {
20755d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
20855d2375eSSean Christopherson }
20955d2375eSSean Christopherson 
21055d2375eSSean Christopherson static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
21155d2375eSSean Christopherson {
21255d2375eSSean Christopherson 	return nested_cpu_has_vmfunc(vmcs12) &&
21355d2375eSSean Christopherson 		(vmcs12->vm_function_control &
21455d2375eSSean Christopherson 		 VMX_VMFUNC_EPTP_SWITCHING);
21555d2375eSSean Christopherson }
21655d2375eSSean Christopherson 
21755d2375eSSean Christopherson static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
21855d2375eSSean Christopherson {
21955d2375eSSean Christopherson 	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
22055d2375eSSean Christopherson }
22155d2375eSSean Christopherson 
22255d2375eSSean Christopherson static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
22355d2375eSSean Christopherson {
22455d2375eSSean Christopherson 	return vmcs12->vm_exit_controls &
22555d2375eSSean Christopherson 	    VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
22655d2375eSSean Christopherson }
22755d2375eSSean Christopherson 
22855d2375eSSean Christopherson /*
22955d2375eSSean Christopherson  * In nested virtualization, check if L1 asked to exit on external interrupts.
23055d2375eSSean Christopherson  * For most existing hypervisors, this will always return true.
23155d2375eSSean Christopherson  */
23255d2375eSSean Christopherson static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
23355d2375eSSean Christopherson {
23455d2375eSSean Christopherson 	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
23555d2375eSSean Christopherson 		PIN_BASED_EXT_INTR_MASK;
23655d2375eSSean Christopherson }
23755d2375eSSean Christopherson 
23855d2375eSSean Christopherson /*
23955d2375eSSean Christopherson  * if fixed0[i] == 1: val[i] must be 1
24055d2375eSSean Christopherson  * if fixed1[i] == 0: val[i] must be 0
24155d2375eSSean Christopherson  */
24255d2375eSSean Christopherson static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
24355d2375eSSean Christopherson {
24455d2375eSSean Christopherson 	return ((val & fixed1) | fixed0) == val;
24555d2375eSSean Christopherson }
24655d2375eSSean Christopherson 
247d4069dbeSPaolo Bonzini static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
24855d2375eSSean Christopherson {
24955d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
25055d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
25155d2375eSSean Christopherson 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
25255d2375eSSean Christopherson 
25355d2375eSSean Christopherson 	if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
25455d2375eSSean Christopherson 		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
25555d2375eSSean Christopherson 	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
25655d2375eSSean Christopherson 		fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
25755d2375eSSean Christopherson 
25855d2375eSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1);
25955d2375eSSean Christopherson }
26055d2375eSSean Christopherson 
261d4069dbeSPaolo Bonzini static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
26255d2375eSSean Christopherson {
26355d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
26455d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
26555d2375eSSean Christopherson 
26655d2375eSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1);
26755d2375eSSean Christopherson }
26855d2375eSSean Christopherson 
269d4069dbeSPaolo Bonzini static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
27055d2375eSSean Christopherson {
27155d2375eSSean Christopherson 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
27255d2375eSSean Christopherson 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
27355d2375eSSean Christopherson 
27455d2375eSSean Christopherson 	return fixed_bits_valid(val, fixed0, fixed1);
27555d2375eSSean Christopherson }
27655d2375eSSean Christopherson 
27755d2375eSSean Christopherson /* No difference in the restrictions on guest and host CR4 in VMX operation. */
27855d2375eSSean Christopherson #define nested_guest_cr4_valid	nested_cr4_valid
27955d2375eSSean Christopherson #define nested_host_cr4_valid	nested_cr4_valid
28055d2375eSSean Christopherson 
28155d2375eSSean Christopherson #endif /* __KVM_X86_VMX_NESTED_H */
282