xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 6ccf4438)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
336103bc07SBen Gardon 							     bool shared)
346103bc07SBen Gardon {
356103bc07SBen Gardon 	if (shared)
366103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
376103bc07SBen Gardon 	else
386103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
396103bc07SBen Gardon }
406103bc07SBen Gardon 
41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42fe5db27dSBen Gardon {
43fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
44fe5db27dSBen Gardon 		return;
4502c00b3aSBen Gardon 
46524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
4702c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
487cca2d0bSBen Gardon 
497cca2d0bSBen Gardon 	/*
507cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
517cca2d0bSBen Gardon 	 * can run before the VM is torn down.
527cca2d0bSBen Gardon 	 */
537cca2d0bSBen Gardon 	rcu_barrier();
5402c00b3aSBen Gardon }
5502c00b3aSBen Gardon 
562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
576103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
586103bc07SBen Gardon 			  bool shared);
592bdb3d84SBen Gardon 
602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
61a889ea54SBen Gardon {
622bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
632bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
64a889ea54SBen Gardon }
65a889ea54SBen Gardon 
66c0e64238SBen Gardon /*
67c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
68c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
69c0e64238SBen Gardon  * the memory.
70c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
71c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
72c0e64238SBen Gardon  * memory won't use it after it is freed.
73c0e64238SBen Gardon  */
74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
75a889ea54SBen Gardon {
76c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
77c0e64238SBen Gardon 					       rcu_head);
78a889ea54SBen Gardon 
79c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
80a889ea54SBen Gardon }
81a889ea54SBen Gardon 
826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
836103bc07SBen Gardon 			  bool shared)
842bdb3d84SBen Gardon {
856103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
862bdb3d84SBen Gardon 
8711cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
882bdb3d84SBen Gardon 		return;
892bdb3d84SBen Gardon 
902bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
912bdb3d84SBen Gardon 
92c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93c0e64238SBen Gardon 	list_del_rcu(&root->link);
94c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
952bdb3d84SBen Gardon 
96524a1e4eSSean Christopherson 	zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
972bdb3d84SBen Gardon 
98c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
99a889ea54SBen Gardon }
100a889ea54SBen Gardon 
101cfc10997SBen Gardon /*
102cfc10997SBen Gardon  * Finds the next valid root after root (or the first valid root if root
103cfc10997SBen Gardon  * is NULL), takes a reference on it, and returns that next root. If root
104cfc10997SBen Gardon  * is not NULL, this thread should have already taken a reference on it, and
105cfc10997SBen Gardon  * that reference will be dropped. If no valid root is found, this
106cfc10997SBen Gardon  * function will return NULL.
107cfc10997SBen Gardon  */
108cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1096103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
1106103bc07SBen Gardon 					      bool shared)
111a889ea54SBen Gardon {
112a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
113a889ea54SBen Gardon 
114c0e64238SBen Gardon 	rcu_read_lock();
115c0e64238SBen Gardon 
116cfc10997SBen Gardon 	if (prev_root)
117c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118c0e64238SBen Gardon 						  &prev_root->link,
119c0e64238SBen Gardon 						  typeof(*prev_root), link);
120cfc10997SBen Gardon 	else
121c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
122cfc10997SBen Gardon 						   typeof(*next_root), link);
123cfc10997SBen Gardon 
124c0e64238SBen Gardon 	while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
125c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
126c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
127fb101293SBen Gardon 
128c0e64238SBen Gardon 	rcu_read_unlock();
129cfc10997SBen Gardon 
130cfc10997SBen Gardon 	if (prev_root)
1316103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
132cfc10997SBen Gardon 
133a889ea54SBen Gardon 	return next_root;
134a889ea54SBen Gardon }
135a889ea54SBen Gardon 
136a889ea54SBen Gardon /*
137a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
138a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
139a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
140a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1416103bc07SBen Gardon  *
1426103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1436103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1446103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
145a889ea54SBen Gardon  */
1466103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
1476103bc07SBen Gardon 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared);		\
148cfc10997SBen Gardon 	     _root;							\
1496103bc07SBen Gardon 	     _root = tdp_mmu_next_root(_kvm, _root, _shared))		\
150a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
151a3f15bdaSSean Christopherson 		} else
152a889ea54SBen Gardon 
153a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)				\
154c0e64238SBen Gardon 	list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,		\
155c0e64238SBen Gardon 				lockdep_is_held_type(&kvm->mmu_lock, 0) ||	\
156c0e64238SBen Gardon 				lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock))	\
157a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
158a3f15bdaSSean Christopherson 		} else
15902c00b3aSBen Gardon 
16002c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
16102c00b3aSBen Gardon 						   int level)
16202c00b3aSBen Gardon {
16302c00b3aSBen Gardon 	union kvm_mmu_page_role role;
16402c00b3aSBen Gardon 
16502c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
16602c00b3aSBen Gardon 	role.level = level;
16702c00b3aSBen Gardon 	role.direct = true;
16802c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
16902c00b3aSBen Gardon 	role.access = ACC_ALL;
17002c00b3aSBen Gardon 
17102c00b3aSBen Gardon 	return role;
17202c00b3aSBen Gardon }
17302c00b3aSBen Gardon 
17402c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
17502c00b3aSBen Gardon 					       int level)
17602c00b3aSBen Gardon {
17702c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
17802c00b3aSBen Gardon 
17902c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
18002c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
18102c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
18202c00b3aSBen Gardon 
18302c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
18402c00b3aSBen Gardon 	sp->gfn = gfn;
18502c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
18602c00b3aSBen Gardon 
18733dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
18833dd3574SBen Gardon 
18902c00b3aSBen Gardon 	return sp;
19002c00b3aSBen Gardon }
19102c00b3aSBen Gardon 
1926e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
19302c00b3aSBen Gardon {
19402c00b3aSBen Gardon 	union kvm_mmu_page_role role;
19502c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
19602c00b3aSBen Gardon 	struct kvm_mmu_page *root;
19702c00b3aSBen Gardon 
1986e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
19902c00b3aSBen Gardon 
20002c00b3aSBen Gardon 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
20102c00b3aSBen Gardon 
20202c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
203a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
204fb101293SBen Gardon 		if (root->role.word == role.word &&
205fb101293SBen Gardon 		    kvm_tdp_mmu_get_root(kvm, root))
2066e6ec584SSean Christopherson 			goto out;
20702c00b3aSBen Gardon 	}
20802c00b3aSBen Gardon 
20902c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
21011cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
21102c00b3aSBen Gardon 
212c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
213c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
214c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
21502c00b3aSBen Gardon 
2166e6ec584SSean Christopherson out:
21702c00b3aSBen Gardon 	return __pa(root->spt);
218fe5db27dSBen Gardon }
2192f2fad08SBen Gardon 
2202f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2219a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2229a77daacSBen Gardon 				bool shared);
2232f2fad08SBen Gardon 
224f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
225f8e14497SBen Gardon {
226f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
227f8e14497SBen Gardon 		return;
228f8e14497SBen Gardon 
229f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
23064bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
23164bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
232f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
233f8e14497SBen Gardon }
234f8e14497SBen Gardon 
235a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
236a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
237a6a0b05dSBen Gardon {
238a6a0b05dSBen Gardon 	bool pfn_changed;
239a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
240a6a0b05dSBen Gardon 
241a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
242a6a0b05dSBen Gardon 		return;
243a6a0b05dSBen Gardon 
244a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
245a6a0b05dSBen Gardon 
246a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
247a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
248a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
249fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
250a6a0b05dSBen Gardon 	}
251a6a0b05dSBen Gardon }
252a6a0b05dSBen Gardon 
2532f2fad08SBen Gardon /**
254a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
255a9442f59SBen Gardon  *
256a9442f59SBen Gardon  * @kvm: kvm instance
257a9442f59SBen Gardon  * @sp: the new page
258a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
259a9442f59SBen Gardon  *		eventual reclaim.
260a9442f59SBen Gardon  */
261a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2629653f2daSSean Christopherson 			      bool account_nx)
263a9442f59SBen Gardon {
2649a77daacSBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
265a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
266a9442f59SBen Gardon 	if (account_nx)
267a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2689a77daacSBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
269a9442f59SBen Gardon }
270a9442f59SBen Gardon 
271a9442f59SBen Gardon /**
272a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
273a9442f59SBen Gardon  *
274a9442f59SBen Gardon  * @kvm: kvm instance
275a9442f59SBen Gardon  * @sp: the page to be removed
2769a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2779a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2789a77daacSBen Gardon  *	    threads that might be adding or removing pages.
279a9442f59SBen Gardon  */
2809a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2819a77daacSBen Gardon 				bool shared)
282a9442f59SBen Gardon {
2839a77daacSBen Gardon 	if (shared)
2849a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2859a77daacSBen Gardon 	else
286a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
287a9442f59SBen Gardon 
288a9442f59SBen Gardon 	list_del(&sp->link);
289a9442f59SBen Gardon 	if (sp->lpage_disallowed)
290a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2919a77daacSBen Gardon 
2929a77daacSBen Gardon 	if (shared)
2939a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
294a9442f59SBen Gardon }
295a9442f59SBen Gardon 
296a9442f59SBen Gardon /**
297a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
298a066e61fSBen Gardon  *
299a066e61fSBen Gardon  * @kvm: kvm instance
300a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3019a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3029a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3039a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
304a066e61fSBen Gardon  *
305a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
306a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
30770fb3e41SBen Gardon  *
30870fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
30970fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
31070fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
31170fb3e41SBen Gardon  * early rcu_dereferences in the function.
312a066e61fSBen Gardon  */
31370fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
3149a77daacSBen Gardon 					bool shared)
315a066e61fSBen Gardon {
31670fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
317a066e61fSBen Gardon 	int level = sp->role.level;
318e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
319a066e61fSBen Gardon 	u64 old_child_spte;
3209a77daacSBen Gardon 	u64 *sptep;
321e25f0e0cSBen Gardon 	gfn_t gfn;
322a066e61fSBen Gardon 	int i;
323a066e61fSBen Gardon 
324a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
325a066e61fSBen Gardon 
3269a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
327a066e61fSBen Gardon 
328a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
32970fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
330f1b83255SKai Huang 		gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
3319a77daacSBen Gardon 
3329a77daacSBen Gardon 		if (shared) {
333e25f0e0cSBen Gardon 			/*
334e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
335e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
336e25f0e0cSBen Gardon 			 * already marked as removed then another thread
337e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
338e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
339e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
340e25f0e0cSBen Gardon 			 */
341e25f0e0cSBen Gardon 			for (;;) {
342e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
343e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
344e25f0e0cSBen Gardon 					break;
345e25f0e0cSBen Gardon 				cpu_relax();
346e25f0e0cSBen Gardon 			}
3479a77daacSBen Gardon 		} else {
3488df9f1afSSean Christopherson 			/*
3498df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3508df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3518df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3528df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3538df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3548df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3558df9f1afSSean Christopherson 			 * unreachable.
3568df9f1afSSean Christopherson 			 */
3579a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3588df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3598df9f1afSSean Christopherson 				continue;
360e25f0e0cSBen Gardon 
361e25f0e0cSBen Gardon 			/*
362e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
363e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
364e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
365e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
366e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
367e25f0e0cSBen Gardon 			 * the function.
368e25f0e0cSBen Gardon 			 */
369e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3709a77daacSBen Gardon 		}
371e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
372f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
373e25f0e0cSBen Gardon 				    shared);
374a066e61fSBen Gardon 	}
375a066e61fSBen Gardon 
376a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
377f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
378a066e61fSBen Gardon 
3797cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
380a066e61fSBen Gardon }
381a066e61fSBen Gardon 
382a066e61fSBen Gardon /**
3837f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
3842f2fad08SBen Gardon  * @kvm: kvm instance
3852f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3862f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3872f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3882f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3892f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3909a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3919a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3929a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3932f2fad08SBen Gardon  *
3942f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3952f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3962f2fad08SBen Gardon  */
3972f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3989a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3999a77daacSBen Gardon 				  bool shared)
4002f2fad08SBen Gardon {
4012f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4022f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4032f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4042f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4052f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4062f2fad08SBen Gardon 
4072f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4082f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
409764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4102f2fad08SBen Gardon 
4112f2fad08SBen Gardon 	/*
4122f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4132f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4142f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
415d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4162f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4172f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4182f2fad08SBen Gardon 	 * thread before replacement.
4192f2fad08SBen Gardon 	 */
4202f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4212f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4222f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4232f2fad08SBen Gardon 		       "different PFN!\n"
4242f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4252f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4262f2fad08SBen Gardon 
4272f2fad08SBen Gardon 		/*
4282f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
429d9f6e12fSIngo Molnar 		 * corruption.
4302f2fad08SBen Gardon 		 */
4312f2fad08SBen Gardon 		BUG();
4322f2fad08SBen Gardon 	}
4332f2fad08SBen Gardon 
4342f2fad08SBen Gardon 	if (old_spte == new_spte)
4352f2fad08SBen Gardon 		return;
4362f2fad08SBen Gardon 
437b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
438b9a98c34SBen Gardon 
4392f2fad08SBen Gardon 	/*
4402f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4412f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4422f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4432f2fad08SBen Gardon 	 */
4442f2fad08SBen Gardon 	if (!was_present && !is_present) {
4452f2fad08SBen Gardon 		/*
44608f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
44708f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
44808f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
44908f07c80SBen Gardon 		 * are nonpresent.
4502f2fad08SBen Gardon 		 */
45108f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
45208f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
45308f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4542f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4552f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4562f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
45708f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
45808f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4592f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4602f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4612f2fad08SBen Gardon 		return;
4622f2fad08SBen Gardon 	}
4632f2fad08SBen Gardon 
46471f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
46571f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4662f2fad08SBen Gardon 
4672f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
46864bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4692f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4702f2fad08SBen Gardon 
4712f2fad08SBen Gardon 	/*
4722f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4732f2fad08SBen Gardon 	 * the paging structure.
4742f2fad08SBen Gardon 	 */
475a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
476a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4779a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4782f2fad08SBen Gardon }
4792f2fad08SBen Gardon 
4802f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4819a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4829a77daacSBen Gardon 				bool shared)
4832f2fad08SBen Gardon {
4849a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4859a77daacSBen Gardon 			      shared);
486f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
487a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
488a6a0b05dSBen Gardon 				      new_spte, level);
4892f2fad08SBen Gardon }
490faaf05b0SBen Gardon 
491fe43fa2fSBen Gardon /*
492*6ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
493*6ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
49424ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
4959a77daacSBen Gardon  *
4969a77daacSBen Gardon  * @kvm: kvm instance
4979a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4989a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4999a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
5009a77daacSBen Gardon  *	    this function will have no side-effects.
5019a77daacSBen Gardon  */
502*6ccf4438SPaolo Bonzini static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
5039a77daacSBen Gardon 					   struct tdp_iter *iter,
5049a77daacSBen Gardon 					   u64 new_spte)
5059a77daacSBen Gardon {
5069a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5079a77daacSBen Gardon 
50808f07c80SBen Gardon 	/*
50908f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
51008f07c80SBen Gardon 	 * may modify it.
51108f07c80SBen Gardon 	 */
5127a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
51308f07c80SBen Gardon 		return false;
51408f07c80SBen Gardon 
5156e8eb206SDavid Matlack 	/*
5166e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5176e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5186e8eb206SDavid Matlack 	 */
5199a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
5209a77daacSBen Gardon 		      new_spte) != iter->old_spte)
5219a77daacSBen Gardon 		return false;
5229a77daacSBen Gardon 
52324ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
52408889894SSean Christopherson 			      new_spte, iter->level, true);
52524ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5269a77daacSBen Gardon 
5279a77daacSBen Gardon 	return true;
5289a77daacSBen Gardon }
5299a77daacSBen Gardon 
53008f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
53108f07c80SBen Gardon 					   struct tdp_iter *iter)
53208f07c80SBen Gardon {
53308f07c80SBen Gardon 	/*
53408f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
53508f07c80SBen Gardon 	 * non-present value. This will stop other threads from
53608f07c80SBen Gardon 	 * immediately installing a present entry in its place
53708f07c80SBen Gardon 	 * before the TLBs are flushed.
53808f07c80SBen Gardon 	 */
539*6ccf4438SPaolo Bonzini 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
54008f07c80SBen Gardon 		return false;
54108f07c80SBen Gardon 
54208f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
54308f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
54408f07c80SBen Gardon 
54508f07c80SBen Gardon 	/*
54608f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
54708f07c80SBen Gardon 	 * must either wait on the MMU lock or use
548d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
54908f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
55008f07c80SBen Gardon 	 * here since the SPTE is going from non-present
55108f07c80SBen Gardon 	 * to non-present.
55208f07c80SBen Gardon 	 */
55314f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
55408f07c80SBen Gardon 
55508f07c80SBen Gardon 	return true;
55608f07c80SBen Gardon }
55708f07c80SBen Gardon 
5589a77daacSBen Gardon 
5599a77daacSBen Gardon /*
560fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
561fe43fa2fSBen Gardon  * @kvm: kvm instance
562fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
563fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
564fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
565fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
566fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
567fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
568fe43fa2fSBen Gardon  *		      double counted.
569fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
570fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
571fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
572fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
573fe43fa2fSBen Gardon  *		      writes from being double counted.
574fe43fa2fSBen Gardon  */
575f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
576a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
577a6a0b05dSBen Gardon 				      bool record_dirty_log)
578faaf05b0SBen Gardon {
579531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5803a9a4aa5SBen Gardon 
58108f07c80SBen Gardon 	/*
58208f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
58308f07c80SBen Gardon 	 * temporary removed SPTE value.
58408f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
58508f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
58608f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
58708f07c80SBen Gardon 	 */
5887a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
58908f07c80SBen Gardon 
5907cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
591faaf05b0SBen Gardon 
59208889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
59308889894SSean Christopherson 			      new_spte, iter->level, false);
594f8e14497SBen Gardon 	if (record_acc_track)
595f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
596f8e14497SBen Gardon 					      iter->level);
597a6a0b05dSBen Gardon 	if (record_dirty_log)
59808889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
599a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
600a6a0b05dSBen Gardon 					      iter->level);
601f8e14497SBen Gardon }
602f8e14497SBen Gardon 
603f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
604f8e14497SBen Gardon 				    u64 new_spte)
605f8e14497SBen Gardon {
606a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
607f8e14497SBen Gardon }
608f8e14497SBen Gardon 
609f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
610f8e14497SBen Gardon 						 struct tdp_iter *iter,
611f8e14497SBen Gardon 						 u64 new_spte)
612f8e14497SBen Gardon {
613a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
614a6a0b05dSBen Gardon }
615a6a0b05dSBen Gardon 
616a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
617a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
618a6a0b05dSBen Gardon 						 u64 new_spte)
619a6a0b05dSBen Gardon {
620a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
621faaf05b0SBen Gardon }
622faaf05b0SBen Gardon 
623faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
624faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
625faaf05b0SBen Gardon 
626f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
627f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
628f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
629f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
630f8e14497SBen Gardon 			continue;					\
631f8e14497SBen Gardon 		else
632f8e14497SBen Gardon 
633bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
634bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
635bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
636bb18842eSBen Gardon 
637faaf05b0SBen Gardon /*
638e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
639e28a436cSBen Gardon  * to the scheduler.
640e28a436cSBen Gardon  *
641e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
642e139a34eSBen Gardon  * TLB flush before yielding.
643e139a34eSBen Gardon  *
644e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
645ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
646ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
647ed5e484bSBen Gardon  * paging structure root.
648e28a436cSBen Gardon  *
649e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
650e28a436cSBen Gardon  * Return false if a yield was not needed.
651e28a436cSBen Gardon  */
652e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
6536103bc07SBen Gardon 					     struct tdp_iter *iter, bool flush,
6546103bc07SBen Gardon 					     bool shared)
655a6a0b05dSBen Gardon {
656ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
657ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
658ed5e484bSBen Gardon 		return false;
659ed5e484bSBen Gardon 
660531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6617cca2d0bSBen Gardon 		rcu_read_unlock();
6627cca2d0bSBen Gardon 
663e139a34eSBen Gardon 		if (flush)
664e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
665e139a34eSBen Gardon 
6666103bc07SBen Gardon 		if (shared)
6676103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
6686103bc07SBen Gardon 		else
669531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6706103bc07SBen Gardon 
6717cca2d0bSBen Gardon 		rcu_read_lock();
672ed5e484bSBen Gardon 
673ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
674ed5e484bSBen Gardon 
675b601c3bcSBen Gardon 		tdp_iter_restart(iter);
676ed5e484bSBen Gardon 
677e28a436cSBen Gardon 		return true;
678a6a0b05dSBen Gardon 	}
679e28a436cSBen Gardon 
680e28a436cSBen Gardon 	return false;
681a6a0b05dSBen Gardon }
682a6a0b05dSBen Gardon 
683faaf05b0SBen Gardon /*
684faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
685faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
686faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
687faaf05b0SBen Gardon  * MMU lock.
6886103bc07SBen Gardon  *
689063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
690063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
691063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
692063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
6936103bc07SBen Gardon  * operation can cause a soft lockup.
6946103bc07SBen Gardon  *
6956103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
6966103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
6976103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
6986103bc07SBen Gardon  * MMU lock in write mode.
699faaf05b0SBen Gardon  */
700faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7016103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7026103bc07SBen Gardon 			  bool shared)
703faaf05b0SBen Gardon {
704524a1e4eSSean Christopherson 	gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
705524a1e4eSSean Christopherson 	bool zap_all = (start == 0 && end >= max_gfn_host);
706faaf05b0SBen Gardon 	struct tdp_iter iter;
707faaf05b0SBen Gardon 
708524a1e4eSSean Christopherson 	/*
7090103098fSSean Christopherson 	 * No need to try to step down in the iterator when zapping all SPTEs,
7100103098fSSean Christopherson 	 * zapping the top-level non-leaf SPTEs will recurse on their children.
7110103098fSSean Christopherson 	 */
7120103098fSSean Christopherson 	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
7130103098fSSean Christopherson 
7140103098fSSean Christopherson 	/*
715524a1e4eSSean Christopherson 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
716524a1e4eSSean Christopherson 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
717524a1e4eSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
718524a1e4eSSean Christopherson 	 */
719524a1e4eSSean Christopherson 	end = min(end, max_gfn_host);
720524a1e4eSSean Christopherson 
7216103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
7226103bc07SBen Gardon 
7237cca2d0bSBen Gardon 	rcu_read_lock();
7247cca2d0bSBen Gardon 
7250103098fSSean Christopherson 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
7260103098fSSean Christopherson 				   min_level, start, end) {
7276103bc07SBen Gardon retry:
7281af4a960SBen Gardon 		if (can_yield &&
7296103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
730a835429cSSean Christopherson 			flush = false;
7311af4a960SBen Gardon 			continue;
7321af4a960SBen Gardon 		}
7331af4a960SBen Gardon 
734faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
735faaf05b0SBen Gardon 			continue;
736faaf05b0SBen Gardon 
737faaf05b0SBen Gardon 		/*
738faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
739faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
740524a1e4eSSean Christopherson 		 * lower level, except when zapping all SPTEs.
741faaf05b0SBen Gardon 		 */
742524a1e4eSSean Christopherson 		if (!zap_all &&
743524a1e4eSSean Christopherson 		    (iter.gfn < start ||
744faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
745faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
746faaf05b0SBen Gardon 			continue;
747faaf05b0SBen Gardon 
7486103bc07SBen Gardon 		if (!shared) {
749faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
750a835429cSSean Christopherson 			flush = true;
7516103bc07SBen Gardon 		} else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
7526103bc07SBen Gardon 			/*
7536103bc07SBen Gardon 			 * The iter must explicitly re-read the SPTE because
7546103bc07SBen Gardon 			 * the atomic cmpxchg failed.
7556103bc07SBen Gardon 			 */
7566103bc07SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
7576103bc07SBen Gardon 			goto retry;
7586103bc07SBen Gardon 		}
759faaf05b0SBen Gardon 	}
7607cca2d0bSBen Gardon 
7617cca2d0bSBen Gardon 	rcu_read_unlock();
762a835429cSSean Christopherson 	return flush;
763faaf05b0SBen Gardon }
764faaf05b0SBen Gardon 
765faaf05b0SBen Gardon /*
766faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
767faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
768faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
769faaf05b0SBen Gardon  * MMU lock.
770faaf05b0SBen Gardon  */
7712b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7725a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
773faaf05b0SBen Gardon {
774faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
775faaf05b0SBen Gardon 
7765a324c24SSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
7776103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
7785a324c24SSean Christopherson 				      false);
779faaf05b0SBen Gardon 
780faaf05b0SBen Gardon 	return flush;
781faaf05b0SBen Gardon }
782faaf05b0SBen Gardon 
783faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
784faaf05b0SBen Gardon {
7852b9663d8SSean Christopherson 	bool flush = false;
7862b9663d8SSean Christopherson 	int i;
787faaf05b0SBen Gardon 
7882b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7895a324c24SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
7902b9663d8SSean Christopherson 
791faaf05b0SBen Gardon 	if (flush)
792faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
793faaf05b0SBen Gardon }
794bb18842eSBen Gardon 
7954c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
7964c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
7974c6654bdSBen Gardon {
7984c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
7994c6654bdSBen Gardon 
8004c6654bdSBen Gardon 	if (prev_root)
8014c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8024c6654bdSBen Gardon 						  &prev_root->link,
8034c6654bdSBen Gardon 						  typeof(*prev_root), link);
8044c6654bdSBen Gardon 	else
8054c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8064c6654bdSBen Gardon 						   typeof(*next_root), link);
8074c6654bdSBen Gardon 
8084c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
8094c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
8104c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8114c6654bdSBen Gardon 						  &next_root->link,
8124c6654bdSBen Gardon 						  typeof(*next_root), link);
8134c6654bdSBen Gardon 
8144c6654bdSBen Gardon 	return next_root;
8154c6654bdSBen Gardon }
8164c6654bdSBen Gardon 
8174c6654bdSBen Gardon /*
8184c6654bdSBen Gardon  * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
8194c6654bdSBen Gardon  * invalidated root, they will not be freed until this function drops the
8204c6654bdSBen Gardon  * reference. Before dropping that reference, tear down the paging
8214c6654bdSBen Gardon  * structure so that whichever thread does drop the last reference
8224c6654bdSBen Gardon  * only has to do a trivial amount of work. Since the roots are invalid,
8234c6654bdSBen Gardon  * no new SPTEs should be created under them.
8244c6654bdSBen Gardon  */
8254c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
8264c6654bdSBen Gardon {
8274c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8284c6654bdSBen Gardon 	struct kvm_mmu_page *root;
8294c6654bdSBen Gardon 	bool flush = false;
8304c6654bdSBen Gardon 
8314c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
8324c6654bdSBen Gardon 
8334c6654bdSBen Gardon 	rcu_read_lock();
8344c6654bdSBen Gardon 
8354c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
8364c6654bdSBen Gardon 
8374c6654bdSBen Gardon 	while (root) {
8384c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
8394c6654bdSBen Gardon 
8404c6654bdSBen Gardon 		rcu_read_unlock();
8414c6654bdSBen Gardon 
842524a1e4eSSean Christopherson 		flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
8434c6654bdSBen Gardon 
8444c6654bdSBen Gardon 		/*
8454c6654bdSBen Gardon 		 * Put the reference acquired in
8464c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
8474c6654bdSBen Gardon 		 */
8484c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
8494c6654bdSBen Gardon 
8504c6654bdSBen Gardon 		root = next_root;
8514c6654bdSBen Gardon 
8524c6654bdSBen Gardon 		rcu_read_lock();
8534c6654bdSBen Gardon 	}
8544c6654bdSBen Gardon 
8554c6654bdSBen Gardon 	rcu_read_unlock();
8564c6654bdSBen Gardon 
8574c6654bdSBen Gardon 	if (flush)
8584c6654bdSBen Gardon 		kvm_flush_remote_tlbs(kvm);
8594c6654bdSBen Gardon }
8604c6654bdSBen Gardon 
861bb18842eSBen Gardon /*
862b7cccd39SBen Gardon  * Mark each TDP MMU root as invalid so that other threads
863b7cccd39SBen Gardon  * will drop their references and allow the root count to
864b7cccd39SBen Gardon  * go to 0.
865b7cccd39SBen Gardon  *
8664c6654bdSBen Gardon  * Also take a reference on all roots so that this thread
8674c6654bdSBen Gardon  * can do the bulk of the work required to free the roots
8684c6654bdSBen Gardon  * once they are invalidated. Without this reference, a
8694c6654bdSBen Gardon  * vCPU thread might drop the last reference to a root and
8704c6654bdSBen Gardon  * get stuck with tearing down the entire paging structure.
8714c6654bdSBen Gardon  *
8724c6654bdSBen Gardon  * Roots which have a zero refcount should be skipped as
8734c6654bdSBen Gardon  * they're already being torn down.
8744c6654bdSBen Gardon  * Already invalid roots should be referenced again so that
8754c6654bdSBen Gardon  * they aren't freed before kvm_tdp_mmu_zap_all_fast is
8764c6654bdSBen Gardon  * done with them.
8774c6654bdSBen Gardon  *
878b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
879b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
880b7cccd39SBen Gardon  */
881b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
882b7cccd39SBen Gardon {
883b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
884b7cccd39SBen Gardon 
885b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
886b7cccd39SBen Gardon 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
8874c6654bdSBen Gardon 		if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
888b7cccd39SBen Gardon 			root->role.invalid = true;
889b7cccd39SBen Gardon }
890b7cccd39SBen Gardon 
891bb18842eSBen Gardon /*
892bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
893bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
894bb18842eSBen Gardon  */
895cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
896cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
897cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
898bb18842eSBen Gardon {
899bb18842eSBen Gardon 	u64 new_spte;
90057a3e96dSKai Huang 	int ret = RET_PF_FIXED;
901bb18842eSBen Gardon 	int make_spte_ret = 0;
902bb18842eSBen Gardon 
903cdc47767SPaolo Bonzini 	if (unlikely(is_noslot_pfn(fault->pfn)))
904bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
9059a77daacSBen Gardon 	else
906bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
907cdc47767SPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefault, true,
908cdc47767SPaolo Bonzini 					 fault->map_writable, !shadow_accessed_mask,
909bb18842eSBen Gardon 					 &new_spte);
910bb18842eSBen Gardon 
911bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
912bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
913*6ccf4438SPaolo Bonzini 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
9149a77daacSBen Gardon 		return RET_PF_RETRY;
915bb18842eSBen Gardon 
916bb18842eSBen Gardon 	/*
917bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
918bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
919bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
920bb18842eSBen Gardon 	 */
921bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
922cdc47767SPaolo Bonzini 		if (fault->write)
923bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
924bb18842eSBen Gardon 	}
925bb18842eSBen Gardon 
926bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
9279a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
9289a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
9299a77daacSBen Gardon 				     new_spte);
930bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
9313849e092SSean Christopherson 	} else {
9329a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
9339a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
9343849e092SSean Christopherson 	}
935bb18842eSBen Gardon 
936857f8474SKai Huang 	/*
937857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
938857f8474SKai Huang 	 * consistent with legacy MMU behavior.
939857f8474SKai Huang 	 */
940857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
941bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
942bb18842eSBen Gardon 
943bb18842eSBen Gardon 	return ret;
944bb18842eSBen Gardon }
945bb18842eSBen Gardon 
946bb18842eSBen Gardon /*
947bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
948bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
949bb18842eSBen Gardon  */
9502f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
951bb18842eSBen Gardon {
952bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
953bb18842eSBen Gardon 	struct tdp_iter iter;
95489c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
955bb18842eSBen Gardon 	u64 *child_pt;
956bb18842eSBen Gardon 	u64 new_spte;
957bb18842eSBen Gardon 	int ret;
958bb18842eSBen Gardon 
95973a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
960bb18842eSBen Gardon 
961f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
9627cca2d0bSBen Gardon 
9637cca2d0bSBen Gardon 	rcu_read_lock();
9647cca2d0bSBen Gardon 
9652f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
96673a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
967536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
968bb18842eSBen Gardon 
96973a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
970bb18842eSBen Gardon 			break;
971bb18842eSBen Gardon 
972bb18842eSBen Gardon 		/*
973bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
974bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
975bb18842eSBen Gardon 		 * with a non-leaf SPTE.
976bb18842eSBen Gardon 		 */
977bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
978bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
97908f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
9809a77daacSBen Gardon 				break;
981bb18842eSBen Gardon 
982bb18842eSBen Gardon 			/*
983bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
984bb18842eSBen Gardon 			 * because the new value informs the !present
985bb18842eSBen Gardon 			 * path below.
986bb18842eSBen Gardon 			 */
9877cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
988bb18842eSBen Gardon 		}
989bb18842eSBen Gardon 
990bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
991ff76d506SKai Huang 			/*
992c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
993ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
994ff76d506SKai Huang 			 * allocation and free.
995ff76d506SKai Huang 			 */
996ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
997ff76d506SKai Huang 				break;
998ff76d506SKai Huang 
999f1b83255SKai Huang 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
100089c0fd49SBen Gardon 			child_pt = sp->spt;
1001a9442f59SBen Gardon 
1002bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
1003bb18842eSBen Gardon 						     !shadow_accessed_mask);
1004bb18842eSBen Gardon 
1005*6ccf4438SPaolo Bonzini 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) {
10069653f2daSSean Christopherson 				tdp_mmu_link_page(vcpu->kvm, sp,
100773a3c659SPaolo Bonzini 						  fault->huge_page_disallowed &&
100873a3c659SPaolo Bonzini 						  fault->req_level >= iter.level);
10099a77daacSBen Gardon 
1010bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
10119a77daacSBen Gardon 			} else {
10129a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
10139a77daacSBen Gardon 				break;
10149a77daacSBen Gardon 			}
1015bb18842eSBen Gardon 		}
1016bb18842eSBen Gardon 	}
1017bb18842eSBen Gardon 
101873a3c659SPaolo Bonzini 	if (iter.level != fault->goal_level) {
10197cca2d0bSBen Gardon 		rcu_read_unlock();
1020bb18842eSBen Gardon 		return RET_PF_RETRY;
10217cca2d0bSBen Gardon 	}
1022bb18842eSBen Gardon 
1023cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
10247cca2d0bSBen Gardon 	rcu_read_unlock();
1025bb18842eSBen Gardon 
1026bb18842eSBen Gardon 	return ret;
1027bb18842eSBen Gardon }
1028063afacdSBen Gardon 
10293039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
10303039bcc7SSean Christopherson 				 bool flush)
1031063afacdSBen Gardon {
1032063afacdSBen Gardon 	struct kvm_mmu_page *root;
1033063afacdSBen Gardon 
10343039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
10353039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
10366103bc07SBen Gardon 				       range->may_block, flush, false);
1037063afacdSBen Gardon 
10383039bcc7SSean Christopherson 	return flush;
10393039bcc7SSean Christopherson }
10403039bcc7SSean Christopherson 
10413039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
10423039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
10433039bcc7SSean Christopherson 
10443039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
10453039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1046c1b91493SSean Christopherson 						   tdp_handler_t handler)
1047063afacdSBen Gardon {
1048063afacdSBen Gardon 	struct kvm_mmu_page *root;
10493039bcc7SSean Christopherson 	struct tdp_iter iter;
10503039bcc7SSean Christopherson 	bool ret = false;
1051063afacdSBen Gardon 
10523039bcc7SSean Christopherson 	rcu_read_lock();
1053063afacdSBen Gardon 
1054063afacdSBen Gardon 	/*
1055e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1056e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1057063afacdSBen Gardon 	 */
10583039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
10593039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
10603039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
10613039bcc7SSean Christopherson 	}
1062063afacdSBen Gardon 
10633039bcc7SSean Christopherson 	rcu_read_unlock();
1064063afacdSBen Gardon 
1065063afacdSBen Gardon 	return ret;
1066063afacdSBen Gardon }
1067063afacdSBen Gardon 
1068f8e14497SBen Gardon /*
1069f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1070f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1071f8e14497SBen Gardon  */
10723039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
10733039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1074f8e14497SBen Gardon {
1075f8e14497SBen Gardon 	u64 new_spte = 0;
1076f8e14497SBen Gardon 
10773039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
10783039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
10793039bcc7SSean Christopherson 		return false;
10807cca2d0bSBen Gardon 
10813039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1082f8e14497SBen Gardon 
1083f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
10848f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1085f8e14497SBen Gardon 	} else {
1086f8e14497SBen Gardon 		/*
1087f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1088f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1089f8e14497SBen Gardon 		 */
1090f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1091f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1092f8e14497SBen Gardon 
1093f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1094f8e14497SBen Gardon 	}
1095f8e14497SBen Gardon 
10963039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
109733dd3574SBen Gardon 
10983039bcc7SSean Christopherson 	return true;
1099f8e14497SBen Gardon }
1100f8e14497SBen Gardon 
11013039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1102f8e14497SBen Gardon {
11033039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1104f8e14497SBen Gardon }
1105f8e14497SBen Gardon 
11063039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
11073039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1108f8e14497SBen Gardon {
11093039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1110f8e14497SBen Gardon }
1111f8e14497SBen Gardon 
11123039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1113f8e14497SBen Gardon {
11143039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
11153039bcc7SSean Christopherson }
11163039bcc7SSean Christopherson 
11173039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
11183039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
11193039bcc7SSean Christopherson {
11203039bcc7SSean Christopherson 	u64 new_spte;
11213039bcc7SSean Christopherson 
11223039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
11233039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
11243039bcc7SSean Christopherson 
11253039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
11263039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
11273039bcc7SSean Christopherson 		return false;
11283039bcc7SSean Christopherson 
11293039bcc7SSean Christopherson 	/*
11303039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
11313039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
11323039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
11333039bcc7SSean Christopherson 	 * See __handle_changed_spte().
11343039bcc7SSean Christopherson 	 */
11353039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
11363039bcc7SSean Christopherson 
11373039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
11383039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
11393039bcc7SSean Christopherson 								  pte_pfn(range->pte));
11403039bcc7SSean Christopherson 
11413039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
11423039bcc7SSean Christopherson 	}
11433039bcc7SSean Christopherson 
11443039bcc7SSean Christopherson 	return true;
1145f8e14497SBen Gardon }
11461d8dd6b3SBen Gardon 
11471d8dd6b3SBen Gardon /*
11481d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
11491d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
11501d8dd6b3SBen Gardon  * notifier.
11511d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
11521d8dd6b3SBen Gardon  */
11533039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
11541d8dd6b3SBen Gardon {
11553039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
11561d8dd6b3SBen Gardon 
11573039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
11583039bcc7SSean Christopherson 	if (flush)
11593039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
11607cca2d0bSBen Gardon 
11613039bcc7SSean Christopherson 	return false;
11621d8dd6b3SBen Gardon }
11631d8dd6b3SBen Gardon 
1164a6a0b05dSBen Gardon /*
1165bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1166bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1167bedd9195SDavid Matlack  * be flushed.
1168a6a0b05dSBen Gardon  */
1169a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1170a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1171a6a0b05dSBen Gardon {
1172a6a0b05dSBen Gardon 	struct tdp_iter iter;
1173a6a0b05dSBen Gardon 	u64 new_spte;
1174a6a0b05dSBen Gardon 	bool spte_set = false;
1175a6a0b05dSBen Gardon 
11767cca2d0bSBen Gardon 	rcu_read_lock();
11777cca2d0bSBen Gardon 
1178a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1179a6a0b05dSBen Gardon 
1180a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1181a6a0b05dSBen Gardon 				   min_level, start, end) {
118224ae4cfaSBen Gardon retry:
118324ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
11841af4a960SBen Gardon 			continue;
11851af4a960SBen Gardon 
1186a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
11870f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
11880f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1189a6a0b05dSBen Gardon 			continue;
1190a6a0b05dSBen Gardon 
1191a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1192a6a0b05dSBen Gardon 
1193*6ccf4438SPaolo Bonzini 		if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) {
119424ae4cfaSBen Gardon 			/*
119524ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
119624ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
119724ae4cfaSBen Gardon 			 */
119824ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
119924ae4cfaSBen Gardon 			goto retry;
120024ae4cfaSBen Gardon 		}
1201a6a0b05dSBen Gardon 		spte_set = true;
1202a6a0b05dSBen Gardon 	}
12037cca2d0bSBen Gardon 
12047cca2d0bSBen Gardon 	rcu_read_unlock();
1205a6a0b05dSBen Gardon 	return spte_set;
1206a6a0b05dSBen Gardon }
1207a6a0b05dSBen Gardon 
1208a6a0b05dSBen Gardon /*
1209a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1210a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1211a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1212a6a0b05dSBen Gardon  */
1213269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1214269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1215a6a0b05dSBen Gardon {
1216a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1217a6a0b05dSBen Gardon 	bool spte_set = false;
1218a6a0b05dSBen Gardon 
121924ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1220a6a0b05dSBen Gardon 
122124ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1222a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1223a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1224a6a0b05dSBen Gardon 
1225a6a0b05dSBen Gardon 	return spte_set;
1226a6a0b05dSBen Gardon }
1227a6a0b05dSBen Gardon 
1228a6a0b05dSBen Gardon /*
1229a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1230a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1231a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1232a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1233a6a0b05dSBen Gardon  * be flushed.
1234a6a0b05dSBen Gardon  */
1235a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1236a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1237a6a0b05dSBen Gardon {
1238a6a0b05dSBen Gardon 	struct tdp_iter iter;
1239a6a0b05dSBen Gardon 	u64 new_spte;
1240a6a0b05dSBen Gardon 	bool spte_set = false;
1241a6a0b05dSBen Gardon 
12427cca2d0bSBen Gardon 	rcu_read_lock();
12437cca2d0bSBen Gardon 
1244a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
124524ae4cfaSBen Gardon retry:
124624ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12471af4a960SBen Gardon 			continue;
12481af4a960SBen Gardon 
1249a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1250a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1251a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1252a6a0b05dSBen Gardon 			else
1253a6a0b05dSBen Gardon 				continue;
1254a6a0b05dSBen Gardon 		} else {
1255a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1256a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1257a6a0b05dSBen Gardon 			else
1258a6a0b05dSBen Gardon 				continue;
1259a6a0b05dSBen Gardon 		}
1260a6a0b05dSBen Gardon 
1261*6ccf4438SPaolo Bonzini 		if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) {
126224ae4cfaSBen Gardon 			/*
126324ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
126424ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
126524ae4cfaSBen Gardon 			 */
126624ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
126724ae4cfaSBen Gardon 			goto retry;
126824ae4cfaSBen Gardon 		}
1269a6a0b05dSBen Gardon 		spte_set = true;
1270a6a0b05dSBen Gardon 	}
12717cca2d0bSBen Gardon 
12727cca2d0bSBen Gardon 	rcu_read_unlock();
1273a6a0b05dSBen Gardon 	return spte_set;
1274a6a0b05dSBen Gardon }
1275a6a0b05dSBen Gardon 
1276a6a0b05dSBen Gardon /*
1277a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1278a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1279a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1280a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1281a6a0b05dSBen Gardon  * be flushed.
1282a6a0b05dSBen Gardon  */
1283269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1284269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1285a6a0b05dSBen Gardon {
1286a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1287a6a0b05dSBen Gardon 	bool spte_set = false;
1288a6a0b05dSBen Gardon 
128924ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1290a6a0b05dSBen Gardon 
129124ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1292a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1293a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1294a6a0b05dSBen Gardon 
1295a6a0b05dSBen Gardon 	return spte_set;
1296a6a0b05dSBen Gardon }
1297a6a0b05dSBen Gardon 
1298a6a0b05dSBen Gardon /*
1299a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1300a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1301a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1302a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1303a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1304a6a0b05dSBen Gardon  */
1305a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1306a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1307a6a0b05dSBen Gardon {
1308a6a0b05dSBen Gardon 	struct tdp_iter iter;
1309a6a0b05dSBen Gardon 	u64 new_spte;
1310a6a0b05dSBen Gardon 
13117cca2d0bSBen Gardon 	rcu_read_lock();
13127cca2d0bSBen Gardon 
1313a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1314a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1315a6a0b05dSBen Gardon 		if (!mask)
1316a6a0b05dSBen Gardon 			break;
1317a6a0b05dSBen Gardon 
1318a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1319a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1320a6a0b05dSBen Gardon 			continue;
1321a6a0b05dSBen Gardon 
1322f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1323f1b3b06aSBen Gardon 
1324a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1325a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1326a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1327a6a0b05dSBen Gardon 			else
1328a6a0b05dSBen Gardon 				continue;
1329a6a0b05dSBen Gardon 		} else {
1330a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1331a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1332a6a0b05dSBen Gardon 			else
1333a6a0b05dSBen Gardon 				continue;
1334a6a0b05dSBen Gardon 		}
1335a6a0b05dSBen Gardon 
1336a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1337a6a0b05dSBen Gardon 	}
13387cca2d0bSBen Gardon 
13397cca2d0bSBen Gardon 	rcu_read_unlock();
1340a6a0b05dSBen Gardon }
1341a6a0b05dSBen Gardon 
1342a6a0b05dSBen Gardon /*
1343a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1344a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1345a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1346a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1347a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1348a6a0b05dSBen Gardon  */
1349a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1350a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1351a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1352a6a0b05dSBen Gardon 				       bool wrprot)
1353a6a0b05dSBen Gardon {
1354a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1355a6a0b05dSBen Gardon 
1356531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1357a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1358a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1359a6a0b05dSBen Gardon }
1360a6a0b05dSBen Gardon 
1361a6a0b05dSBen Gardon /*
136287aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
136387aa9ec9SBen Gardon  * GFNs within the slot.
136414881998SBen Gardon  */
1365af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
136614881998SBen Gardon 				       struct kvm_mmu_page *root,
13678ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1368af95b53eSSean Christopherson 				       bool flush)
136914881998SBen Gardon {
13709eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
13719eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
137214881998SBen Gardon 	struct tdp_iter iter;
137314881998SBen Gardon 	kvm_pfn_t pfn;
137414881998SBen Gardon 
13757cca2d0bSBen Gardon 	rcu_read_lock();
13767cca2d0bSBen Gardon 
137714881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
13782db6f772SBen Gardon retry:
13792db6f772SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1380af95b53eSSean Christopherson 			flush = false;
13811af4a960SBen Gardon 			continue;
13821af4a960SBen Gardon 		}
13831af4a960SBen Gardon 
138414881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
138587aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
138614881998SBen Gardon 			continue;
138714881998SBen Gardon 
138814881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
138914881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
13909eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
13919eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
139214881998SBen Gardon 			continue;
139314881998SBen Gardon 
13942db6f772SBen Gardon 		if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
13952db6f772SBen Gardon 			/*
13962db6f772SBen Gardon 			 * The iter must explicitly re-read the SPTE because
13972db6f772SBen Gardon 			 * the atomic cmpxchg failed.
13982db6f772SBen Gardon 			 */
13992db6f772SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
14002db6f772SBen Gardon 			goto retry;
14012db6f772SBen Gardon 		}
1402af95b53eSSean Christopherson 		flush = true;
140314881998SBen Gardon 	}
140414881998SBen Gardon 
14057cca2d0bSBen Gardon 	rcu_read_unlock();
1406af95b53eSSean Christopherson 
1407af95b53eSSean Christopherson 	return flush;
140814881998SBen Gardon }
140914881998SBen Gardon 
141014881998SBen Gardon /*
141114881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
141214881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
141314881998SBen Gardon  */
1414142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
14158ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
14168ca6f063SBen Gardon 				       bool flush)
141714881998SBen Gardon {
141814881998SBen Gardon 	struct kvm_mmu_page *root;
141914881998SBen Gardon 
14202db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
142114881998SBen Gardon 
14222db6f772SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1423af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1424af95b53eSSean Christopherson 
1425142ccde1SSean Christopherson 	return flush;
142614881998SBen Gardon }
142746044f72SBen Gardon 
142846044f72SBen Gardon /*
142946044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
14305fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
143146044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
143246044f72SBen Gardon  */
143346044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
14343ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
143546044f72SBen Gardon {
143646044f72SBen Gardon 	struct tdp_iter iter;
143746044f72SBen Gardon 	u64 new_spte;
143846044f72SBen Gardon 	bool spte_set = false;
143946044f72SBen Gardon 
14403ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
14413ad93562SKeqian Zhu 
14427cca2d0bSBen Gardon 	rcu_read_lock();
14437cca2d0bSBen Gardon 
14443ad93562SKeqian Zhu 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
14453ad93562SKeqian Zhu 				   min_level, gfn, gfn + 1) {
14463ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
14473ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
14483ad93562SKeqian Zhu 			continue;
14493ad93562SKeqian Zhu 
145046044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
145146044f72SBen Gardon 			break;
145246044f72SBen Gardon 
145346044f72SBen Gardon 		new_spte = iter.old_spte &
14545fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
145546044f72SBen Gardon 
145646044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
145746044f72SBen Gardon 		spte_set = true;
145846044f72SBen Gardon 	}
145946044f72SBen Gardon 
14607cca2d0bSBen Gardon 	rcu_read_unlock();
14617cca2d0bSBen Gardon 
146246044f72SBen Gardon 	return spte_set;
146346044f72SBen Gardon }
146446044f72SBen Gardon 
146546044f72SBen Gardon /*
146646044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
14675fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
146846044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
146946044f72SBen Gardon  */
147046044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
14713ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
14723ad93562SKeqian Zhu 				   int min_level)
147346044f72SBen Gardon {
147446044f72SBen Gardon 	struct kvm_mmu_page *root;
147546044f72SBen Gardon 	bool spte_set = false;
147646044f72SBen Gardon 
1477531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1478a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
14793ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1480a3f15bdaSSean Christopherson 
148146044f72SBen Gardon 	return spte_set;
148246044f72SBen Gardon }
148346044f72SBen Gardon 
148495fb5b02SBen Gardon /*
148595fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
148695fb5b02SBen Gardon  * That SPTE may be non-present.
1487c5c8c7c5SDavid Matlack  *
1488c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
148995fb5b02SBen Gardon  */
149039b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
149139b4d43eSSean Christopherson 			 int *root_level)
149295fb5b02SBen Gardon {
149395fb5b02SBen Gardon 	struct tdp_iter iter;
149495fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
149595fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
14962aa07893SSean Christopherson 	int leaf = -1;
149795fb5b02SBen Gardon 
149839b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
149995fb5b02SBen Gardon 
150095fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
150195fb5b02SBen Gardon 		leaf = iter.level;
1502dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
150395fb5b02SBen Gardon 	}
150495fb5b02SBen Gardon 
150595fb5b02SBen Gardon 	return leaf;
150695fb5b02SBen Gardon }
15076e8eb206SDavid Matlack 
15086e8eb206SDavid Matlack /*
15096e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
15106e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
15116e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
15126e8eb206SDavid Matlack  *
15136e8eb206SDavid Matlack  * Contract:
15146e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
15156e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
15166e8eb206SDavid Matlack  *
15176e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
15186e8eb206SDavid Matlack  */
15196e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
15206e8eb206SDavid Matlack 					u64 *spte)
15216e8eb206SDavid Matlack {
15226e8eb206SDavid Matlack 	struct tdp_iter iter;
15236e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
15246e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
15256e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
15266e8eb206SDavid Matlack 
15276e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
15286e8eb206SDavid Matlack 		*spte = iter.old_spte;
15296e8eb206SDavid Matlack 		sptep = iter.sptep;
15306e8eb206SDavid Matlack 	}
15316e8eb206SDavid Matlack 
15326e8eb206SDavid Matlack 	/*
15336e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
15346e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
15356e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
15366e8eb206SDavid Matlack 	 * annotation.
15376e8eb206SDavid Matlack 	 *
15386e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
15396e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
15406e8eb206SDavid Matlack 	 * outside of mmu_lock.
15416e8eb206SDavid Matlack 	 */
15426e8eb206SDavid Matlack 	return rcu_dereference(sptep);
15436e8eb206SDavid Matlack }
1544