xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision fb101293)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
442bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
452bdb3d84SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush);
462bdb3d84SBen Gardon 
472bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
48a889ea54SBen Gardon {
492bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
502bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
512bdb3d84SBen Gardon }
522bdb3d84SBen Gardon 
532bdb3d84SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
542bdb3d84SBen Gardon {
552bdb3d84SBen Gardon 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
562bdb3d84SBen Gardon 
572bdb3d84SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
582bdb3d84SBen Gardon 
5911cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
602bdb3d84SBen Gardon 		return;
612bdb3d84SBen Gardon 
622bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
632bdb3d84SBen Gardon 
642bdb3d84SBen Gardon 	list_del(&root->link);
652bdb3d84SBen Gardon 
662bdb3d84SBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false, false);
672bdb3d84SBen Gardon 
682bdb3d84SBen Gardon 	tdp_mmu_free_sp(root);
69a889ea54SBen Gardon }
70a889ea54SBen Gardon 
71cfc10997SBen Gardon /*
72cfc10997SBen Gardon  * Finds the next valid root after root (or the first valid root if root
73cfc10997SBen Gardon  * is NULL), takes a reference on it, and returns that next root. If root
74cfc10997SBen Gardon  * is not NULL, this thread should have already taken a reference on it, and
75cfc10997SBen Gardon  * that reference will be dropped. If no valid root is found, this
76cfc10997SBen Gardon  * function will return NULL.
77cfc10997SBen Gardon  */
78cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
79cfc10997SBen Gardon 					      struct kvm_mmu_page *prev_root)
80a889ea54SBen Gardon {
81a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
82a889ea54SBen Gardon 
83cfc10997SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
84cfc10997SBen Gardon 
85cfc10997SBen Gardon 	if (prev_root)
86cfc10997SBen Gardon 		next_root = list_next_entry(prev_root, link);
87cfc10997SBen Gardon 	else
88cfc10997SBen Gardon 		next_root = list_first_entry(&kvm->arch.tdp_mmu_roots,
89cfc10997SBen Gardon 					     typeof(*next_root), link);
90cfc10997SBen Gardon 
91*fb101293SBen Gardon 	while (!list_entry_is_head(next_root, &kvm->arch.tdp_mmu_roots, link) &&
92*fb101293SBen Gardon 	       !kvm_tdp_mmu_get_root(kvm, next_root))
93*fb101293SBen Gardon 		next_root = list_next_entry(next_root, link);
94*fb101293SBen Gardon 
95cfc10997SBen Gardon 	if (list_entry_is_head(next_root, &kvm->arch.tdp_mmu_roots, link))
96cfc10997SBen Gardon 		next_root = NULL;
97cfc10997SBen Gardon 
98cfc10997SBen Gardon 	if (prev_root)
99cfc10997SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root);
100cfc10997SBen Gardon 
101a889ea54SBen Gardon 	return next_root;
102a889ea54SBen Gardon }
103a889ea54SBen Gardon 
104a889ea54SBen Gardon /*
105a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
106a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
107a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
108a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
109a889ea54SBen Gardon  */
110a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)		\
111cfc10997SBen Gardon 	for (_root = tdp_mmu_next_root(_kvm, NULL);		\
112cfc10997SBen Gardon 	     _root;						\
113a3f15bdaSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root))		\
114a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {	\
115a3f15bdaSSean Christopherson 		} else
116a889ea54SBen Gardon 
117a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
118a3f15bdaSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
119a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
120a3f15bdaSSean Christopherson 		} else
12102c00b3aSBen Gardon 
12202c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
12302c00b3aSBen Gardon 						   int level)
12402c00b3aSBen Gardon {
12502c00b3aSBen Gardon 	union kvm_mmu_page_role role;
12602c00b3aSBen Gardon 
12702c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
12802c00b3aSBen Gardon 	role.level = level;
12902c00b3aSBen Gardon 	role.direct = true;
13002c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
13102c00b3aSBen Gardon 	role.access = ACC_ALL;
13202c00b3aSBen Gardon 
13302c00b3aSBen Gardon 	return role;
13402c00b3aSBen Gardon }
13502c00b3aSBen Gardon 
13602c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
13702c00b3aSBen Gardon 					       int level)
13802c00b3aSBen Gardon {
13902c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
14002c00b3aSBen Gardon 
14102c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
14202c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
14302c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
14402c00b3aSBen Gardon 
14502c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
14602c00b3aSBen Gardon 	sp->gfn = gfn;
14702c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
14802c00b3aSBen Gardon 
14933dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
15033dd3574SBen Gardon 
15102c00b3aSBen Gardon 	return sp;
15202c00b3aSBen Gardon }
15302c00b3aSBen Gardon 
1546e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
15502c00b3aSBen Gardon {
15602c00b3aSBen Gardon 	union kvm_mmu_page_role role;
15702c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
15802c00b3aSBen Gardon 	struct kvm_mmu_page *root;
15902c00b3aSBen Gardon 
1606e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
16102c00b3aSBen Gardon 
1626e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
16302c00b3aSBen Gardon 
16402c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
165a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
166*fb101293SBen Gardon 		if (root->role.word == role.word &&
167*fb101293SBen Gardon 		    kvm_tdp_mmu_get_root(kvm, root))
1686e6ec584SSean Christopherson 			goto out;
16902c00b3aSBen Gardon 	}
17002c00b3aSBen Gardon 
17102c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
17211cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
17302c00b3aSBen Gardon 
17402c00b3aSBen Gardon 	list_add(&root->link, &kvm->arch.tdp_mmu_roots);
17502c00b3aSBen Gardon 
1766e6ec584SSean Christopherson out:
17702c00b3aSBen Gardon 	return __pa(root->spt);
178fe5db27dSBen Gardon }
1792f2fad08SBen Gardon 
1807cca2d0bSBen Gardon /*
1817cca2d0bSBen Gardon  * This is called through call_rcu in order to free TDP page table memory
1827cca2d0bSBen Gardon  * safely with respect to other kernel threads that may be operating on
1837cca2d0bSBen Gardon  * the memory.
1847cca2d0bSBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
1857cca2d0bSBen Gardon  * section, and freeing it after a grace period, lockless access to that
1867cca2d0bSBen Gardon  * memory won't use it after it is freed.
1877cca2d0bSBen Gardon  */
1887cca2d0bSBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
1897cca2d0bSBen Gardon {
1907cca2d0bSBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
1917cca2d0bSBen Gardon 					       rcu_head);
1927cca2d0bSBen Gardon 
1937cca2d0bSBen Gardon 	tdp_mmu_free_sp(sp);
1947cca2d0bSBen Gardon }
1957cca2d0bSBen Gardon 
1962f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1979a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
1989a77daacSBen Gardon 				bool shared);
1992f2fad08SBen Gardon 
200f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
201f8e14497SBen Gardon {
202f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
203f8e14497SBen Gardon 		return;
204f8e14497SBen Gardon 
205f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
20664bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
20764bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
208f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
209f8e14497SBen Gardon }
210f8e14497SBen Gardon 
211a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
212a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
213a6a0b05dSBen Gardon {
214a6a0b05dSBen Gardon 	bool pfn_changed;
215a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
216a6a0b05dSBen Gardon 
217a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
218a6a0b05dSBen Gardon 		return;
219a6a0b05dSBen Gardon 
220a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
221a6a0b05dSBen Gardon 
222a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
223a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
224a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
225fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
226a6a0b05dSBen Gardon 	}
227a6a0b05dSBen Gardon }
228a6a0b05dSBen Gardon 
2292f2fad08SBen Gardon /**
230a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
231a9442f59SBen Gardon  *
232a9442f59SBen Gardon  * @kvm: kvm instance
233a9442f59SBen Gardon  * @sp: the new page
2349a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2359a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2369a77daacSBen Gardon  *	    threads that might be adding or removing pages.
237a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
238a9442f59SBen Gardon  *		eventual reclaim.
239a9442f59SBen Gardon  */
240a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2419a77daacSBen Gardon 			      bool shared, bool account_nx)
242a9442f59SBen Gardon {
2439a77daacSBen Gardon 	if (shared)
2449a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2459a77daacSBen Gardon 	else
246a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
247a9442f59SBen Gardon 
248a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
249a9442f59SBen Gardon 	if (account_nx)
250a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2519a77daacSBen Gardon 
2529a77daacSBen Gardon 	if (shared)
2539a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
254a9442f59SBen Gardon }
255a9442f59SBen Gardon 
256a9442f59SBen Gardon /**
257a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
258a9442f59SBen Gardon  *
259a9442f59SBen Gardon  * @kvm: kvm instance
260a9442f59SBen Gardon  * @sp: the page to be removed
2619a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2629a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2639a77daacSBen Gardon  *	    threads that might be adding or removing pages.
264a9442f59SBen Gardon  */
2659a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2669a77daacSBen Gardon 				bool shared)
267a9442f59SBen Gardon {
2689a77daacSBen Gardon 	if (shared)
2699a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2709a77daacSBen Gardon 	else
271a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
272a9442f59SBen Gardon 
273a9442f59SBen Gardon 	list_del(&sp->link);
274a9442f59SBen Gardon 	if (sp->lpage_disallowed)
275a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2769a77daacSBen Gardon 
2779a77daacSBen Gardon 	if (shared)
2789a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
279a9442f59SBen Gardon }
280a9442f59SBen Gardon 
281a9442f59SBen Gardon /**
282a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
283a066e61fSBen Gardon  *
284a066e61fSBen Gardon  * @kvm: kvm instance
285a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2869a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2879a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
2889a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
289a066e61fSBen Gardon  *
290a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
291a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
29270fb3e41SBen Gardon  *
29370fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
29470fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
29570fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
29670fb3e41SBen Gardon  * early rcu_dereferences in the function.
297a066e61fSBen Gardon  */
29870fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
2999a77daacSBen Gardon 					bool shared)
300a066e61fSBen Gardon {
30170fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
302a066e61fSBen Gardon 	int level = sp->role.level;
303e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
304a066e61fSBen Gardon 	u64 old_child_spte;
3059a77daacSBen Gardon 	u64 *sptep;
306e25f0e0cSBen Gardon 	gfn_t gfn;
307a066e61fSBen Gardon 	int i;
308a066e61fSBen Gardon 
309a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
310a066e61fSBen Gardon 
3119a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
312a066e61fSBen Gardon 
313a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
31470fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
315e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3169a77daacSBen Gardon 
3179a77daacSBen Gardon 		if (shared) {
318e25f0e0cSBen Gardon 			/*
319e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
320e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
321e25f0e0cSBen Gardon 			 * already marked as removed then another thread
322e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
323e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
324e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
325e25f0e0cSBen Gardon 			 */
326e25f0e0cSBen Gardon 			for (;;) {
327e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
328e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
329e25f0e0cSBen Gardon 					break;
330e25f0e0cSBen Gardon 				cpu_relax();
331e25f0e0cSBen Gardon 			}
3329a77daacSBen Gardon 		} else {
3338df9f1afSSean Christopherson 			/*
3348df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3358df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3368df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3378df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3388df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3398df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3408df9f1afSSean Christopherson 			 * unreachable.
3418df9f1afSSean Christopherson 			 */
3429a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3438df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3448df9f1afSSean Christopherson 				continue;
345e25f0e0cSBen Gardon 
346e25f0e0cSBen Gardon 			/*
347e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
348e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
349e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
350e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
351e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
352e25f0e0cSBen Gardon 			 * the function.
353e25f0e0cSBen Gardon 			 */
354e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3559a77daacSBen Gardon 		}
356e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
357e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
358e25f0e0cSBen Gardon 				    shared);
359a066e61fSBen Gardon 	}
360a066e61fSBen Gardon 
361a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
362a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
363a066e61fSBen Gardon 
3647cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
365a066e61fSBen Gardon }
366a066e61fSBen Gardon 
367a066e61fSBen Gardon /**
3682f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3692f2fad08SBen Gardon  * @kvm: kvm instance
3702f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3712f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3722f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3732f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3742f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3759a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3769a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3779a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3782f2fad08SBen Gardon  *
3792f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3802f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3812f2fad08SBen Gardon  */
3822f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3839a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3849a77daacSBen Gardon 				  bool shared)
3852f2fad08SBen Gardon {
3862f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3872f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3882f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3892f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3902f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
3912f2fad08SBen Gardon 
3922f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
3932f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
394764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
3952f2fad08SBen Gardon 
3962f2fad08SBen Gardon 	/*
3972f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
3982f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
3992f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
4002f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
4012f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4022f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4032f2fad08SBen Gardon 	 * thread before replacement.
4042f2fad08SBen Gardon 	 */
4052f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4062f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4072f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4082f2fad08SBen Gardon 		       "different PFN!\n"
4092f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4102f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4112f2fad08SBen Gardon 
4122f2fad08SBen Gardon 		/*
4132f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4142f2fad08SBen Gardon 		 * courruption.
4152f2fad08SBen Gardon 		 */
4162f2fad08SBen Gardon 		BUG();
4172f2fad08SBen Gardon 	}
4182f2fad08SBen Gardon 
4192f2fad08SBen Gardon 	if (old_spte == new_spte)
4202f2fad08SBen Gardon 		return;
4212f2fad08SBen Gardon 
422b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
423b9a98c34SBen Gardon 
4242f2fad08SBen Gardon 	/*
4252f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4262f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4272f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4282f2fad08SBen Gardon 	 */
4292f2fad08SBen Gardon 	if (!was_present && !is_present) {
4302f2fad08SBen Gardon 		/*
43108f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
43208f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
43308f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
43408f07c80SBen Gardon 		 * are nonpresent.
4352f2fad08SBen Gardon 		 */
43608f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
43708f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
43808f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4392f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4402f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4412f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
44208f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
44308f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4442f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4452f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4462f2fad08SBen Gardon 		return;
4472f2fad08SBen Gardon 	}
4482f2fad08SBen Gardon 
4492f2fad08SBen Gardon 
4502f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
45164bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4522f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4532f2fad08SBen Gardon 
4542f2fad08SBen Gardon 	/*
4552f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4562f2fad08SBen Gardon 	 * the paging structure.
4572f2fad08SBen Gardon 	 */
458a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
459a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4609a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4612f2fad08SBen Gardon }
4622f2fad08SBen Gardon 
4632f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4649a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4659a77daacSBen Gardon 				bool shared)
4662f2fad08SBen Gardon {
4679a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4689a77daacSBen Gardon 			      shared);
469f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
470a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
471a6a0b05dSBen Gardon 				      new_spte, level);
4722f2fad08SBen Gardon }
473faaf05b0SBen Gardon 
474fe43fa2fSBen Gardon /*
4759a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4769a77daacSBen Gardon  * associated bookkeeping
4779a77daacSBen Gardon  *
4789a77daacSBen Gardon  * @kvm: kvm instance
4799a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4809a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4819a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4829a77daacSBen Gardon  *	    this function will have no side-effects.
4839a77daacSBen Gardon  */
4849a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4859a77daacSBen Gardon 					   struct tdp_iter *iter,
4869a77daacSBen Gardon 					   u64 new_spte)
4879a77daacSBen Gardon {
4889a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4899a77daacSBen Gardon 
49008f07c80SBen Gardon 	/*
49108f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
49208f07c80SBen Gardon 	 * may modify it.
49308f07c80SBen Gardon 	 */
4947a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
49508f07c80SBen Gardon 		return false;
49608f07c80SBen Gardon 
4979a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
4989a77daacSBen Gardon 		      new_spte) != iter->old_spte)
4999a77daacSBen Gardon 		return false;
5009a77daacSBen Gardon 
50108889894SSean Christopherson 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
50208889894SSean Christopherson 			    new_spte, iter->level, true);
5039a77daacSBen Gardon 
5049a77daacSBen Gardon 	return true;
5059a77daacSBen Gardon }
5069a77daacSBen Gardon 
50708f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
50808f07c80SBen Gardon 					   struct tdp_iter *iter)
50908f07c80SBen Gardon {
51008f07c80SBen Gardon 	/*
51108f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
51208f07c80SBen Gardon 	 * non-present value. This will stop other threads from
51308f07c80SBen Gardon 	 * immediately installing a present entry in its place
51408f07c80SBen Gardon 	 * before the TLBs are flushed.
51508f07c80SBen Gardon 	 */
51608f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
51708f07c80SBen Gardon 		return false;
51808f07c80SBen Gardon 
51908f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
52008f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
52108f07c80SBen Gardon 
52208f07c80SBen Gardon 	/*
52308f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
52408f07c80SBen Gardon 	 * must either wait on the MMU lock or use
52508f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
52608f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
52708f07c80SBen Gardon 	 * here since the SPTE is going from non-present
52808f07c80SBen Gardon 	 * to non-present.
52908f07c80SBen Gardon 	 */
53014f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
53108f07c80SBen Gardon 
53208f07c80SBen Gardon 	return true;
53308f07c80SBen Gardon }
53408f07c80SBen Gardon 
5359a77daacSBen Gardon 
5369a77daacSBen Gardon /*
537fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
538fe43fa2fSBen Gardon  * @kvm: kvm instance
539fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
540fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
541fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
542fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
543fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
544fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
545fe43fa2fSBen Gardon  *		      double counted.
546fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
547fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
548fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
549fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
550fe43fa2fSBen Gardon  *		      writes from being double counted.
551fe43fa2fSBen Gardon  */
552f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
553a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
554a6a0b05dSBen Gardon 				      bool record_dirty_log)
555faaf05b0SBen Gardon {
556531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5573a9a4aa5SBen Gardon 
55808f07c80SBen Gardon 	/*
55908f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
56008f07c80SBen Gardon 	 * temporary removed SPTE value.
56108f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
56208f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
56308f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
56408f07c80SBen Gardon 	 */
5657a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
56608f07c80SBen Gardon 
5677cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
568faaf05b0SBen Gardon 
56908889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
57008889894SSean Christopherson 			      new_spte, iter->level, false);
571f8e14497SBen Gardon 	if (record_acc_track)
572f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
573f8e14497SBen Gardon 					      iter->level);
574a6a0b05dSBen Gardon 	if (record_dirty_log)
57508889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
576a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
577a6a0b05dSBen Gardon 					      iter->level);
578f8e14497SBen Gardon }
579f8e14497SBen Gardon 
580f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
581f8e14497SBen Gardon 				    u64 new_spte)
582f8e14497SBen Gardon {
583a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
584f8e14497SBen Gardon }
585f8e14497SBen Gardon 
586f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
587f8e14497SBen Gardon 						 struct tdp_iter *iter,
588f8e14497SBen Gardon 						 u64 new_spte)
589f8e14497SBen Gardon {
590a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
591a6a0b05dSBen Gardon }
592a6a0b05dSBen Gardon 
593a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
594a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
595a6a0b05dSBen Gardon 						 u64 new_spte)
596a6a0b05dSBen Gardon {
597a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
598faaf05b0SBen Gardon }
599faaf05b0SBen Gardon 
600faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
601faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
602faaf05b0SBen Gardon 
603f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
604f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
605f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
606f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
607f8e14497SBen Gardon 			continue;					\
608f8e14497SBen Gardon 		else
609f8e14497SBen Gardon 
610bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
611bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
612bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
613bb18842eSBen Gardon 
614faaf05b0SBen Gardon /*
615e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
616e28a436cSBen Gardon  * to the scheduler.
617e28a436cSBen Gardon  *
618e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
619e139a34eSBen Gardon  * TLB flush before yielding.
620e139a34eSBen Gardon  *
621e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
622ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
623ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
624ed5e484bSBen Gardon  * paging structure root.
625e28a436cSBen Gardon  *
626e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
627e28a436cSBen Gardon  * Return false if a yield was not needed.
628e28a436cSBen Gardon  */
629e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
630e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
631a6a0b05dSBen Gardon {
632ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
633ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
634ed5e484bSBen Gardon 		return false;
635ed5e484bSBen Gardon 
636531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6377cca2d0bSBen Gardon 		rcu_read_unlock();
6387cca2d0bSBen Gardon 
639e139a34eSBen Gardon 		if (flush)
640e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
641e139a34eSBen Gardon 
642531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6437cca2d0bSBen Gardon 		rcu_read_lock();
644ed5e484bSBen Gardon 
645ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
646ed5e484bSBen Gardon 
647b601c3bcSBen Gardon 		tdp_iter_restart(iter);
648ed5e484bSBen Gardon 
649e28a436cSBen Gardon 		return true;
650a6a0b05dSBen Gardon 	}
651e28a436cSBen Gardon 
652e28a436cSBen Gardon 	return false;
653a6a0b05dSBen Gardon }
654a6a0b05dSBen Gardon 
655faaf05b0SBen Gardon /*
656faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
657faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
658faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
659faaf05b0SBen Gardon  * MMU lock.
660063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
661063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
662063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
663063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
664a835429cSSean Christopherson  * operation can cause a soft lockup.  Note, in some use cases a flush may be
665a835429cSSean Christopherson  * required by prior actions.  Ensure the pending flush is performed prior to
666a835429cSSean Christopherson  * yielding.
667faaf05b0SBen Gardon  */
668faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
669a835429cSSean Christopherson 			  gfn_t start, gfn_t end, bool can_yield, bool flush)
670faaf05b0SBen Gardon {
671faaf05b0SBen Gardon 	struct tdp_iter iter;
672faaf05b0SBen Gardon 
6737cca2d0bSBen Gardon 	rcu_read_lock();
6747cca2d0bSBen Gardon 
675faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6761af4a960SBen Gardon 		if (can_yield &&
677a835429cSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
678a835429cSSean Christopherson 			flush = false;
6791af4a960SBen Gardon 			continue;
6801af4a960SBen Gardon 		}
6811af4a960SBen Gardon 
682faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
683faaf05b0SBen Gardon 			continue;
684faaf05b0SBen Gardon 
685faaf05b0SBen Gardon 		/*
686faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
687faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
688faaf05b0SBen Gardon 		 * lower level.
689faaf05b0SBen Gardon 		 */
690faaf05b0SBen Gardon 		if ((iter.gfn < start ||
691faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
692faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
693faaf05b0SBen Gardon 			continue;
694faaf05b0SBen Gardon 
695faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
696a835429cSSean Christopherson 		flush = true;
697faaf05b0SBen Gardon 	}
6987cca2d0bSBen Gardon 
6997cca2d0bSBen Gardon 	rcu_read_unlock();
700a835429cSSean Christopherson 	return flush;
701faaf05b0SBen Gardon }
702faaf05b0SBen Gardon 
703faaf05b0SBen Gardon /*
704faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
705faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
706faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
707faaf05b0SBen Gardon  * MMU lock.
708faaf05b0SBen Gardon  */
7092b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7102b9663d8SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
711faaf05b0SBen Gardon {
712faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
713faaf05b0SBen Gardon 
714a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
71533a31641SSean Christopherson 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
716faaf05b0SBen Gardon 
717faaf05b0SBen Gardon 	return flush;
718faaf05b0SBen Gardon }
719faaf05b0SBen Gardon 
720faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
721faaf05b0SBen Gardon {
722339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
7232b9663d8SSean Christopherson 	bool flush = false;
7242b9663d8SSean Christopherson 	int i;
725faaf05b0SBen Gardon 
7262b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7272b9663d8SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
7282b9663d8SSean Christopherson 
729faaf05b0SBen Gardon 	if (flush)
730faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
731faaf05b0SBen Gardon }
732bb18842eSBen Gardon 
733bb18842eSBen Gardon /*
734bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
735bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
736bb18842eSBen Gardon  */
737bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
738bb18842eSBen Gardon 					  int map_writable,
739bb18842eSBen Gardon 					  struct tdp_iter *iter,
740bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
741bb18842eSBen Gardon {
742bb18842eSBen Gardon 	u64 new_spte;
743bb18842eSBen Gardon 	int ret = 0;
744bb18842eSBen Gardon 	int make_spte_ret = 0;
745bb18842eSBen Gardon 
7469a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
747bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7489a77daacSBen Gardon 	else
749bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
750bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
751bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
752bb18842eSBen Gardon 					 &new_spte);
753bb18842eSBen Gardon 
754bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
755bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7569a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7579a77daacSBen Gardon 		return RET_PF_RETRY;
758bb18842eSBen Gardon 
759bb18842eSBen Gardon 	/*
760bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
761bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
762bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
763bb18842eSBen Gardon 	 */
764bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
765bb18842eSBen Gardon 		if (write)
766bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
767bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
768bb18842eSBen Gardon 	}
769bb18842eSBen Gardon 
770bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7719a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7729a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7739a77daacSBen Gardon 				     new_spte);
774bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
7753849e092SSean Christopherson 	} else {
7769a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7779a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
7783849e092SSean Christopherson 	}
779bb18842eSBen Gardon 
780bb18842eSBen Gardon 	if (!prefault)
781bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
782bb18842eSBen Gardon 
783bb18842eSBen Gardon 	return ret;
784bb18842eSBen Gardon }
785bb18842eSBen Gardon 
786bb18842eSBen Gardon /*
787bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
788bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
789bb18842eSBen Gardon  */
790bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
791bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
792bb18842eSBen Gardon 		    bool prefault)
793bb18842eSBen Gardon {
794bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
795bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
796bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
797bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
798bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
799bb18842eSBen Gardon 	struct tdp_iter iter;
80089c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
801bb18842eSBen Gardon 	u64 *child_pt;
802bb18842eSBen Gardon 	u64 new_spte;
803bb18842eSBen Gardon 	int ret;
804bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
805bb18842eSBen Gardon 	int level;
806bb18842eSBen Gardon 	int req_level;
807bb18842eSBen Gardon 
808bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
809bb18842eSBen Gardon 		return RET_PF_RETRY;
810bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
811bb18842eSBen Gardon 		return RET_PF_RETRY;
812bb18842eSBen Gardon 
813bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
814bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
815bb18842eSBen Gardon 
816bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8177cca2d0bSBen Gardon 
8187cca2d0bSBen Gardon 	rcu_read_lock();
8197cca2d0bSBen Gardon 
820bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
821bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
822bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
823bb18842eSBen Gardon 						   iter.level, &pfn, &level);
824bb18842eSBen Gardon 
825bb18842eSBen Gardon 		if (iter.level == level)
826bb18842eSBen Gardon 			break;
827bb18842eSBen Gardon 
828bb18842eSBen Gardon 		/*
829bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
830bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
831bb18842eSBen Gardon 		 * with a non-leaf SPTE.
832bb18842eSBen Gardon 		 */
833bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
834bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
83508f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8369a77daacSBen Gardon 				break;
837bb18842eSBen Gardon 
838bb18842eSBen Gardon 			/*
839bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
840bb18842eSBen Gardon 			 * because the new value informs the !present
841bb18842eSBen Gardon 			 * path below.
842bb18842eSBen Gardon 			 */
8437cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
844bb18842eSBen Gardon 		}
845bb18842eSBen Gardon 
846bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
84789c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
84889c0fd49SBen Gardon 			child_pt = sp->spt;
849a9442f59SBen Gardon 
850bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
851bb18842eSBen Gardon 						     !shadow_accessed_mask);
852bb18842eSBen Gardon 
8539a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8549a77daacSBen Gardon 						    new_spte)) {
8559a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8569a77daacSBen Gardon 						  huge_page_disallowed &&
8579a77daacSBen Gardon 						  req_level >= iter.level);
8589a77daacSBen Gardon 
859bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8609a77daacSBen Gardon 			} else {
8619a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8629a77daacSBen Gardon 				break;
8639a77daacSBen Gardon 			}
864bb18842eSBen Gardon 		}
865bb18842eSBen Gardon 	}
866bb18842eSBen Gardon 
8679a77daacSBen Gardon 	if (iter.level != level) {
8687cca2d0bSBen Gardon 		rcu_read_unlock();
869bb18842eSBen Gardon 		return RET_PF_RETRY;
8707cca2d0bSBen Gardon 	}
871bb18842eSBen Gardon 
872bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
873bb18842eSBen Gardon 					      pfn, prefault);
8747cca2d0bSBen Gardon 	rcu_read_unlock();
875bb18842eSBen Gardon 
876bb18842eSBen Gardon 	return ret;
877bb18842eSBen Gardon }
878063afacdSBen Gardon 
8793039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
8803039bcc7SSean Christopherson 				 bool flush)
8813039bcc7SSean Christopherson {
8823039bcc7SSean Christopherson 	struct kvm_mmu_page *root;
883c1b91493SSean Christopherson 
8843039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
8853039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
886e1eed584SSean Christopherson 				       range->may_block, flush);
8873039bcc7SSean Christopherson 
8883039bcc7SSean Christopherson 	return flush;
8893039bcc7SSean Christopherson }
8903039bcc7SSean Christopherson 
8913039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
8923039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
8933039bcc7SSean Christopherson 
8943039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
8953039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
896c1b91493SSean Christopherson 						   tdp_handler_t handler)
897063afacdSBen Gardon {
898063afacdSBen Gardon 	struct kvm_mmu_page *root;
8993039bcc7SSean Christopherson 	struct tdp_iter iter;
9003039bcc7SSean Christopherson 	bool ret = false;
901063afacdSBen Gardon 
9023039bcc7SSean Christopherson 	rcu_read_lock();
903063afacdSBen Gardon 
904e1eed584SSean Christopherson 	/*
905e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
906e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
907e1eed584SSean Christopherson 	 */
9083039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
9093039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
9103039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
9113039bcc7SSean Christopherson 	}
912063afacdSBen Gardon 
9133039bcc7SSean Christopherson 	rcu_read_unlock();
914063afacdSBen Gardon 
915063afacdSBen Gardon 	return ret;
916063afacdSBen Gardon }
917063afacdSBen Gardon 
918f8e14497SBen Gardon /*
919f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
920f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
921f8e14497SBen Gardon  */
9223039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
9233039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
924f8e14497SBen Gardon {
9253039bcc7SSean Christopherson 	u64 new_spte = 0;
926f8e14497SBen Gardon 
9273039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
9283039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
9293039bcc7SSean Christopherson 		return false;
9307cca2d0bSBen Gardon 
9313039bcc7SSean Christopherson 	new_spte = iter->old_spte;
932f8e14497SBen Gardon 
933f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
9348f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
935f8e14497SBen Gardon 	} else {
936f8e14497SBen Gardon 		/*
937f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
938f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
939f8e14497SBen Gardon 		 */
940f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
941f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
942f8e14497SBen Gardon 
943f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
944f8e14497SBen Gardon 	}
945f8e14497SBen Gardon 
9463039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
9473039bcc7SSean Christopherson 
9483039bcc7SSean Christopherson 	return true;
949f8e14497SBen Gardon }
950f8e14497SBen Gardon 
9513039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
952f8e14497SBen Gardon {
9533039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
954f8e14497SBen Gardon }
955f8e14497SBen Gardon 
9563039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
9573039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
958f8e14497SBen Gardon {
9593039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
960f8e14497SBen Gardon }
961f8e14497SBen Gardon 
9623039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
963f8e14497SBen Gardon {
9643039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
9653039bcc7SSean Christopherson }
9663039bcc7SSean Christopherson 
9673039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
9683039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
9693039bcc7SSean Christopherson {
9703039bcc7SSean Christopherson 	u64 new_spte;
9713039bcc7SSean Christopherson 
9723039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
9733039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
9743039bcc7SSean Christopherson 
9753039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
9763039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
9773039bcc7SSean Christopherson 		return false;
9783039bcc7SSean Christopherson 
9793039bcc7SSean Christopherson 	/*
9803039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
9813039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
9823039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
9833039bcc7SSean Christopherson 	 * See __handle_changed_spte().
9843039bcc7SSean Christopherson 	 */
9853039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
9863039bcc7SSean Christopherson 
9873039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
9883039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
9893039bcc7SSean Christopherson 								  pte_pfn(range->pte));
9903039bcc7SSean Christopherson 
9913039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
9923039bcc7SSean Christopherson 	}
9933039bcc7SSean Christopherson 
9943039bcc7SSean Christopherson 	return true;
995f8e14497SBen Gardon }
9961d8dd6b3SBen Gardon 
9971d8dd6b3SBen Gardon /*
9981d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
9991d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
10001d8dd6b3SBen Gardon  * notifier.
10011d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
10021d8dd6b3SBen Gardon  */
10033039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
10041d8dd6b3SBen Gardon {
10053039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
10061d8dd6b3SBen Gardon 
10073039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
10083039bcc7SSean Christopherson 	if (flush)
10093039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
10107cca2d0bSBen Gardon 
10113039bcc7SSean Christopherson 	return false;
10121d8dd6b3SBen Gardon }
10131d8dd6b3SBen Gardon 
1014a6a0b05dSBen Gardon /*
1015a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1016a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1017a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1018a6a0b05dSBen Gardon  */
1019a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1020a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1021a6a0b05dSBen Gardon {
1022a6a0b05dSBen Gardon 	struct tdp_iter iter;
1023a6a0b05dSBen Gardon 	u64 new_spte;
1024a6a0b05dSBen Gardon 	bool spte_set = false;
1025a6a0b05dSBen Gardon 
10267cca2d0bSBen Gardon 	rcu_read_lock();
10277cca2d0bSBen Gardon 
1028a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1029a6a0b05dSBen Gardon 
1030a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1031a6a0b05dSBen Gardon 				   min_level, start, end) {
10321af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10331af4a960SBen Gardon 			continue;
10341af4a960SBen Gardon 
1035a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
10360f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
10370f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1038a6a0b05dSBen Gardon 			continue;
1039a6a0b05dSBen Gardon 
1040a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1041a6a0b05dSBen Gardon 
1042a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1043a6a0b05dSBen Gardon 		spte_set = true;
1044a6a0b05dSBen Gardon 	}
10457cca2d0bSBen Gardon 
10467cca2d0bSBen Gardon 	rcu_read_unlock();
1047a6a0b05dSBen Gardon 	return spte_set;
1048a6a0b05dSBen Gardon }
1049a6a0b05dSBen Gardon 
1050a6a0b05dSBen Gardon /*
1051a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1052a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1053a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1054a6a0b05dSBen Gardon  */
1055a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1056a6a0b05dSBen Gardon 			     int min_level)
1057a6a0b05dSBen Gardon {
1058a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1059a6a0b05dSBen Gardon 	bool spte_set = false;
1060a6a0b05dSBen Gardon 
1061a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1062a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1063a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1064a6a0b05dSBen Gardon 
1065a6a0b05dSBen Gardon 	return spte_set;
1066a6a0b05dSBen Gardon }
1067a6a0b05dSBen Gardon 
1068a6a0b05dSBen Gardon /*
1069a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1070a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1071a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1072a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1073a6a0b05dSBen Gardon  * be flushed.
1074a6a0b05dSBen Gardon  */
1075a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1076a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1077a6a0b05dSBen Gardon {
1078a6a0b05dSBen Gardon 	struct tdp_iter iter;
1079a6a0b05dSBen Gardon 	u64 new_spte;
1080a6a0b05dSBen Gardon 	bool spte_set = false;
1081a6a0b05dSBen Gardon 
10827cca2d0bSBen Gardon 	rcu_read_lock();
10837cca2d0bSBen Gardon 
1084a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
10851af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10861af4a960SBen Gardon 			continue;
10871af4a960SBen Gardon 
1088a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1089a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1090a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1091a6a0b05dSBen Gardon 			else
1092a6a0b05dSBen Gardon 				continue;
1093a6a0b05dSBen Gardon 		} else {
1094a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1095a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1096a6a0b05dSBen Gardon 			else
1097a6a0b05dSBen Gardon 				continue;
1098a6a0b05dSBen Gardon 		}
1099a6a0b05dSBen Gardon 
1100a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1101a6a0b05dSBen Gardon 		spte_set = true;
1102a6a0b05dSBen Gardon 	}
11037cca2d0bSBen Gardon 
11047cca2d0bSBen Gardon 	rcu_read_unlock();
1105a6a0b05dSBen Gardon 	return spte_set;
1106a6a0b05dSBen Gardon }
1107a6a0b05dSBen Gardon 
1108a6a0b05dSBen Gardon /*
1109a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1110a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1111a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1112a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1113a6a0b05dSBen Gardon  * be flushed.
1114a6a0b05dSBen Gardon  */
1115a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1116a6a0b05dSBen Gardon {
1117a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1118a6a0b05dSBen Gardon 	bool spte_set = false;
1119a6a0b05dSBen Gardon 
1120a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1121a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1122a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1123a6a0b05dSBen Gardon 
1124a6a0b05dSBen Gardon 	return spte_set;
1125a6a0b05dSBen Gardon }
1126a6a0b05dSBen Gardon 
1127a6a0b05dSBen Gardon /*
1128a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1129a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1130a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1131a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1132a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1133a6a0b05dSBen Gardon  */
1134a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1135a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1136a6a0b05dSBen Gardon {
1137a6a0b05dSBen Gardon 	struct tdp_iter iter;
1138a6a0b05dSBen Gardon 	u64 new_spte;
1139a6a0b05dSBen Gardon 
11407cca2d0bSBen Gardon 	rcu_read_lock();
11417cca2d0bSBen Gardon 
1142a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1143a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1144a6a0b05dSBen Gardon 		if (!mask)
1145a6a0b05dSBen Gardon 			break;
1146a6a0b05dSBen Gardon 
1147a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1148a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1149a6a0b05dSBen Gardon 			continue;
1150a6a0b05dSBen Gardon 
1151f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1152f1b3b06aSBen Gardon 
1153a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1154a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1155a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1156a6a0b05dSBen Gardon 			else
1157a6a0b05dSBen Gardon 				continue;
1158a6a0b05dSBen Gardon 		} else {
1159a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1160a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1161a6a0b05dSBen Gardon 			else
1162a6a0b05dSBen Gardon 				continue;
1163a6a0b05dSBen Gardon 		}
1164a6a0b05dSBen Gardon 
1165a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1166a6a0b05dSBen Gardon 	}
11677cca2d0bSBen Gardon 
11687cca2d0bSBen Gardon 	rcu_read_unlock();
1169a6a0b05dSBen Gardon }
1170a6a0b05dSBen Gardon 
1171a6a0b05dSBen Gardon /*
1172a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1173a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1174a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1175a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1176a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1177a6a0b05dSBen Gardon  */
1178a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1179a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1180a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1181a6a0b05dSBen Gardon 				       bool wrprot)
1182a6a0b05dSBen Gardon {
1183a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1184a6a0b05dSBen Gardon 
1185531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1186a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1187a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1188a6a0b05dSBen Gardon }
1189a6a0b05dSBen Gardon 
1190a6a0b05dSBen Gardon /*
119187aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
119287aa9ec9SBen Gardon  * GFNs within the slot.
119314881998SBen Gardon  */
1194af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
119514881998SBen Gardon 				       struct kvm_mmu_page *root,
11968ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1197af95b53eSSean Christopherson 				       bool flush)
119814881998SBen Gardon {
11999eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
12009eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
120114881998SBen Gardon 	struct tdp_iter iter;
120214881998SBen Gardon 	kvm_pfn_t pfn;
120314881998SBen Gardon 
12047cca2d0bSBen Gardon 	rcu_read_lock();
12057cca2d0bSBen Gardon 
120614881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
1207af95b53eSSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
1208af95b53eSSean Christopherson 			flush = false;
12091af4a960SBen Gardon 			continue;
12101af4a960SBen Gardon 		}
12111af4a960SBen Gardon 
121214881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
121387aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
121414881998SBen Gardon 			continue;
121514881998SBen Gardon 
121614881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
121714881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
12189eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
12199eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
122014881998SBen Gardon 			continue;
122114881998SBen Gardon 
122214881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
122314881998SBen Gardon 
1224af95b53eSSean Christopherson 		flush = true;
122514881998SBen Gardon 	}
122614881998SBen Gardon 
12277cca2d0bSBen Gardon 	rcu_read_unlock();
1228af95b53eSSean Christopherson 
1229af95b53eSSean Christopherson 	return flush;
123014881998SBen Gardon }
123114881998SBen Gardon 
123214881998SBen Gardon /*
123314881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
123414881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
123514881998SBen Gardon  */
1236142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
12378ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
12388ca6f063SBen Gardon 				       bool flush)
123914881998SBen Gardon {
124014881998SBen Gardon 	struct kvm_mmu_page *root;
124114881998SBen Gardon 
1242a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1243af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1244af95b53eSSean Christopherson 
1245142ccde1SSean Christopherson 	return flush;
124614881998SBen Gardon }
124746044f72SBen Gardon 
124846044f72SBen Gardon /*
124946044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12505fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
125146044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
125246044f72SBen Gardon  */
125346044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
125446044f72SBen Gardon 			      gfn_t gfn)
125546044f72SBen Gardon {
125646044f72SBen Gardon 	struct tdp_iter iter;
125746044f72SBen Gardon 	u64 new_spte;
125846044f72SBen Gardon 	bool spte_set = false;
125946044f72SBen Gardon 
12607cca2d0bSBen Gardon 	rcu_read_lock();
12617cca2d0bSBen Gardon 
126246044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
126346044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
126446044f72SBen Gardon 			break;
126546044f72SBen Gardon 
126646044f72SBen Gardon 		new_spte = iter.old_spte &
12675fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
126846044f72SBen Gardon 
126946044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
127046044f72SBen Gardon 		spte_set = true;
127146044f72SBen Gardon 	}
127246044f72SBen Gardon 
12737cca2d0bSBen Gardon 	rcu_read_unlock();
12747cca2d0bSBen Gardon 
127546044f72SBen Gardon 	return spte_set;
127646044f72SBen Gardon }
127746044f72SBen Gardon 
127846044f72SBen Gardon /*
127946044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12805fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
128146044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
128246044f72SBen Gardon  */
128346044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
128446044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
128546044f72SBen Gardon {
128646044f72SBen Gardon 	struct kvm_mmu_page *root;
128746044f72SBen Gardon 	bool spte_set = false;
128846044f72SBen Gardon 
1289531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1290a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
129146044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
1292a3f15bdaSSean Christopherson 
129346044f72SBen Gardon 	return spte_set;
129446044f72SBen Gardon }
129546044f72SBen Gardon 
129695fb5b02SBen Gardon /*
129795fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
129895fb5b02SBen Gardon  * That SPTE may be non-present.
129995fb5b02SBen Gardon  */
130039b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
130139b4d43eSSean Christopherson 			 int *root_level)
130295fb5b02SBen Gardon {
130395fb5b02SBen Gardon 	struct tdp_iter iter;
130495fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
130595fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
13062aa07893SSean Christopherson 	int leaf = -1;
130795fb5b02SBen Gardon 
130839b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
130995fb5b02SBen Gardon 
13107cca2d0bSBen Gardon 	rcu_read_lock();
13117cca2d0bSBen Gardon 
131295fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
131395fb5b02SBen Gardon 		leaf = iter.level;
1314dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
131595fb5b02SBen Gardon 	}
131695fb5b02SBen Gardon 
13177cca2d0bSBen Gardon 	rcu_read_unlock();
13187cca2d0bSBen Gardon 
131995fb5b02SBen Gardon 	return leaf;
132095fb5b02SBen Gardon }
1321