xref: /openbmc/linux/arch/x86/kvm/kvm_onhyperv.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
13c86c0d3SVineeth Pillai // SPDX-License-Identifier: GPL-2.0-only
23c86c0d3SVineeth Pillai /*
33c86c0d3SVineeth Pillai  * KVM L1 hypervisor optimizations on Hyper-V.
43c86c0d3SVineeth Pillai  */
58d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
63c86c0d3SVineeth Pillai 
73c86c0d3SVineeth Pillai #include <linux/kvm_host.h>
83c86c0d3SVineeth Pillai #include <asm/mshyperv.h>
93c86c0d3SVineeth Pillai 
103c86c0d3SVineeth Pillai #include "hyperv.h"
113c86c0d3SVineeth Pillai #include "kvm_onhyperv.h"
123c86c0d3SVineeth Pillai 
13*9ed3bf41SSean Christopherson struct kvm_hv_tlb_range {
14*9ed3bf41SSean Christopherson 	u64 start_gfn;
15*9ed3bf41SSean Christopherson 	u64 pages;
16*9ed3bf41SSean Christopherson };
17*9ed3bf41SSean Christopherson 
kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list * flush,void * data)183c86c0d3SVineeth Pillai static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
193c86c0d3SVineeth Pillai 		void *data)
203c86c0d3SVineeth Pillai {
21*9ed3bf41SSean Christopherson 	struct kvm_hv_tlb_range *range = data;
223c86c0d3SVineeth Pillai 
233c86c0d3SVineeth Pillai 	return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
243c86c0d3SVineeth Pillai 			range->pages);
253c86c0d3SVineeth Pillai }
263c86c0d3SVineeth Pillai 
hv_remote_flush_root_tdp(hpa_t root_tdp,struct kvm_hv_tlb_range * range)273c86c0d3SVineeth Pillai static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
28*9ed3bf41SSean Christopherson 					   struct kvm_hv_tlb_range *range)
293c86c0d3SVineeth Pillai {
303c86c0d3SVineeth Pillai 	if (range)
313c86c0d3SVineeth Pillai 		return hyperv_flush_guest_mapping_range(root_tdp,
323c86c0d3SVineeth Pillai 				kvm_fill_hv_flush_list_func, (void *)range);
333c86c0d3SVineeth Pillai 	else
343c86c0d3SVineeth Pillai 		return hyperv_flush_guest_mapping(root_tdp);
353c86c0d3SVineeth Pillai }
363c86c0d3SVineeth Pillai 
__hv_flush_remote_tlbs_range(struct kvm * kvm,struct kvm_hv_tlb_range * range)37*9ed3bf41SSean Christopherson static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
38*9ed3bf41SSean Christopherson 					struct kvm_hv_tlb_range *range)
393c86c0d3SVineeth Pillai {
403c86c0d3SVineeth Pillai 	struct kvm_arch *kvm_arch = &kvm->arch;
413c86c0d3SVineeth Pillai 	struct kvm_vcpu *vcpu;
4246808a4cSMarc Zyngier 	int ret = 0, nr_unique_valid_roots;
4346808a4cSMarc Zyngier 	unsigned long i;
443c86c0d3SVineeth Pillai 	hpa_t root;
453c86c0d3SVineeth Pillai 
463c86c0d3SVineeth Pillai 	spin_lock(&kvm_arch->hv_root_tdp_lock);
473c86c0d3SVineeth Pillai 
483c86c0d3SVineeth Pillai 	if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
493c86c0d3SVineeth Pillai 		nr_unique_valid_roots = 0;
503c86c0d3SVineeth Pillai 
513c86c0d3SVineeth Pillai 		/*
523c86c0d3SVineeth Pillai 		 * Flush all valid roots, and see if all vCPUs have converged
533c86c0d3SVineeth Pillai 		 * on a common root, in which case future flushes can skip the
543c86c0d3SVineeth Pillai 		 * loop and flush the common root.
553c86c0d3SVineeth Pillai 		 */
563c86c0d3SVineeth Pillai 		kvm_for_each_vcpu(i, vcpu, kvm) {
573c86c0d3SVineeth Pillai 			root = vcpu->arch.hv_root_tdp;
583c86c0d3SVineeth Pillai 			if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
593c86c0d3SVineeth Pillai 				continue;
603c86c0d3SVineeth Pillai 
613c86c0d3SVineeth Pillai 			/*
623c86c0d3SVineeth Pillai 			 * Set the tracked root to the first valid root.  Keep
633c86c0d3SVineeth Pillai 			 * this root for the entirety of the loop even if more
643c86c0d3SVineeth Pillai 			 * roots are encountered as a low effort optimization
653c86c0d3SVineeth Pillai 			 * to avoid flushing the same (first) root again.
663c86c0d3SVineeth Pillai 			 */
673c86c0d3SVineeth Pillai 			if (++nr_unique_valid_roots == 1)
683c86c0d3SVineeth Pillai 				kvm_arch->hv_root_tdp = root;
693c86c0d3SVineeth Pillai 
703c86c0d3SVineeth Pillai 			if (!ret)
713c86c0d3SVineeth Pillai 				ret = hv_remote_flush_root_tdp(root, range);
723c86c0d3SVineeth Pillai 
733c86c0d3SVineeth Pillai 			/*
743c86c0d3SVineeth Pillai 			 * Stop processing roots if a failure occurred and
753c86c0d3SVineeth Pillai 			 * multiple valid roots have already been detected.
763c86c0d3SVineeth Pillai 			 */
773c86c0d3SVineeth Pillai 			if (ret && nr_unique_valid_roots > 1)
783c86c0d3SVineeth Pillai 				break;
793c86c0d3SVineeth Pillai 		}
803c86c0d3SVineeth Pillai 
813c86c0d3SVineeth Pillai 		/*
823c86c0d3SVineeth Pillai 		 * The optimized flush of a single root can't be used if there
833c86c0d3SVineeth Pillai 		 * are multiple valid roots (obviously).
843c86c0d3SVineeth Pillai 		 */
853c86c0d3SVineeth Pillai 		if (nr_unique_valid_roots > 1)
863c86c0d3SVineeth Pillai 			kvm_arch->hv_root_tdp = INVALID_PAGE;
873c86c0d3SVineeth Pillai 	} else {
883c86c0d3SVineeth Pillai 		ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
893c86c0d3SVineeth Pillai 	}
903c86c0d3SVineeth Pillai 
913c86c0d3SVineeth Pillai 	spin_unlock(&kvm_arch->hv_root_tdp_lock);
923c86c0d3SVineeth Pillai 	return ret;
933c86c0d3SVineeth Pillai }
94*9ed3bf41SSean Christopherson 
hv_flush_remote_tlbs_range(struct kvm * kvm,gfn_t start_gfn,gfn_t nr_pages)95*9ed3bf41SSean Christopherson int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
96*9ed3bf41SSean Christopherson {
97*9ed3bf41SSean Christopherson 	struct kvm_hv_tlb_range range = {
98*9ed3bf41SSean Christopherson 		.start_gfn = start_gfn,
99*9ed3bf41SSean Christopherson 		.pages = nr_pages,
100*9ed3bf41SSean Christopherson 	};
101*9ed3bf41SSean Christopherson 
102*9ed3bf41SSean Christopherson 	return __hv_flush_remote_tlbs_range(kvm, &range);
103*9ed3bf41SSean Christopherson }
1048a1300ffSSean Christopherson EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
1053c86c0d3SVineeth Pillai 
hv_flush_remote_tlbs(struct kvm * kvm)1068a1300ffSSean Christopherson int hv_flush_remote_tlbs(struct kvm *kvm)
1073c86c0d3SVineeth Pillai {
108*9ed3bf41SSean Christopherson 	return __hv_flush_remote_tlbs_range(kvm, NULL);
1093c86c0d3SVineeth Pillai }
1108a1300ffSSean Christopherson EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
1113d4421f8SSean Christopherson 
hv_track_root_tdp(struct kvm_vcpu * vcpu,hpa_t root_tdp)1123d4421f8SSean Christopherson void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
1133d4421f8SSean Christopherson {
1143d4421f8SSean Christopherson 	struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
1153d4421f8SSean Christopherson 
1168a1300ffSSean Christopherson 	if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
1173d4421f8SSean Christopherson 		spin_lock(&kvm_arch->hv_root_tdp_lock);
1183d4421f8SSean Christopherson 		vcpu->arch.hv_root_tdp = root_tdp;
1193d4421f8SSean Christopherson 		if (root_tdp != kvm_arch->hv_root_tdp)
1203d4421f8SSean Christopherson 			kvm_arch->hv_root_tdp = INVALID_PAGE;
1213d4421f8SSean Christopherson 		spin_unlock(&kvm_arch->hv_root_tdp_lock);
1223d4421f8SSean Christopherson 	}
1233d4421f8SSean Christopherson }
1243d4421f8SSean Christopherson EXPORT_SYMBOL_GPL(hv_track_root_tdp);
125