xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 71f51d2c)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
336103bc07SBen Gardon 							     bool shared)
346103bc07SBen Gardon {
356103bc07SBen Gardon 	if (shared)
366103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
376103bc07SBen Gardon 	else
386103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
396103bc07SBen Gardon }
406103bc07SBen Gardon 
41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42fe5db27dSBen Gardon {
43fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
44fe5db27dSBen Gardon 		return;
4502c00b3aSBen Gardon 
46524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
4702c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
487cca2d0bSBen Gardon 
497cca2d0bSBen Gardon 	/*
507cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
517cca2d0bSBen Gardon 	 * can run before the VM is torn down.
527cca2d0bSBen Gardon 	 */
537cca2d0bSBen Gardon 	rcu_barrier();
5402c00b3aSBen Gardon }
5502c00b3aSBen Gardon 
562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
576103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
586103bc07SBen Gardon 			  bool shared);
592bdb3d84SBen Gardon 
602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
61a889ea54SBen Gardon {
622bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
632bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
64a889ea54SBen Gardon }
65a889ea54SBen Gardon 
66c0e64238SBen Gardon /*
67c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
68c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
69c0e64238SBen Gardon  * the memory.
70c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
71c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
72c0e64238SBen Gardon  * memory won't use it after it is freed.
73c0e64238SBen Gardon  */
74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
75a889ea54SBen Gardon {
76c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
77c0e64238SBen Gardon 					       rcu_head);
78a889ea54SBen Gardon 
79c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
80a889ea54SBen Gardon }
81a889ea54SBen Gardon 
826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
836103bc07SBen Gardon 			  bool shared)
842bdb3d84SBen Gardon {
856103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
862bdb3d84SBen Gardon 
8711cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
882bdb3d84SBen Gardon 		return;
892bdb3d84SBen Gardon 
902bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
912bdb3d84SBen Gardon 
92c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93c0e64238SBen Gardon 	list_del_rcu(&root->link);
94c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
952bdb3d84SBen Gardon 
96524a1e4eSSean Christopherson 	zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
972bdb3d84SBen Gardon 
98c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
99a889ea54SBen Gardon }
100a889ea54SBen Gardon 
101cfc10997SBen Gardon /*
102cfc10997SBen Gardon  * Finds the next valid root after root (or the first valid root if root
103cfc10997SBen Gardon  * is NULL), takes a reference on it, and returns that next root. If root
104cfc10997SBen Gardon  * is not NULL, this thread should have already taken a reference on it, and
105cfc10997SBen Gardon  * that reference will be dropped. If no valid root is found, this
106cfc10997SBen Gardon  * function will return NULL.
107cfc10997SBen Gardon  */
108cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1096103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
1106103bc07SBen Gardon 					      bool shared)
111a889ea54SBen Gardon {
112a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
113a889ea54SBen Gardon 
114c0e64238SBen Gardon 	rcu_read_lock();
115c0e64238SBen Gardon 
116cfc10997SBen Gardon 	if (prev_root)
117c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118c0e64238SBen Gardon 						  &prev_root->link,
119c0e64238SBen Gardon 						  typeof(*prev_root), link);
120cfc10997SBen Gardon 	else
121c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
122cfc10997SBen Gardon 						   typeof(*next_root), link);
123cfc10997SBen Gardon 
124c0e64238SBen Gardon 	while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
125c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
126c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
127fb101293SBen Gardon 
128c0e64238SBen Gardon 	rcu_read_unlock();
129cfc10997SBen Gardon 
130cfc10997SBen Gardon 	if (prev_root)
1316103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
132cfc10997SBen Gardon 
133a889ea54SBen Gardon 	return next_root;
134a889ea54SBen Gardon }
135a889ea54SBen Gardon 
136a889ea54SBen Gardon /*
137a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
138a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
139a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
140a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1416103bc07SBen Gardon  *
1426103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1436103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1446103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
145a889ea54SBen Gardon  */
1466103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
1476103bc07SBen Gardon 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared);		\
148cfc10997SBen Gardon 	     _root;							\
1496103bc07SBen Gardon 	     _root = tdp_mmu_next_root(_kvm, _root, _shared))		\
150a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
151a3f15bdaSSean Christopherson 		} else
152a889ea54SBen Gardon 
153a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)				\
154c0e64238SBen Gardon 	list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,		\
155c0e64238SBen Gardon 				lockdep_is_held_type(&kvm->mmu_lock, 0) ||	\
156c0e64238SBen Gardon 				lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock))	\
157a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
158a3f15bdaSSean Christopherson 		} else
15902c00b3aSBen Gardon 
16002c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
16102c00b3aSBen Gardon 						   int level)
16202c00b3aSBen Gardon {
16302c00b3aSBen Gardon 	union kvm_mmu_page_role role;
16402c00b3aSBen Gardon 
16502c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
16602c00b3aSBen Gardon 	role.level = level;
16702c00b3aSBen Gardon 	role.direct = true;
16802c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
16902c00b3aSBen Gardon 	role.access = ACC_ALL;
17002c00b3aSBen Gardon 
17102c00b3aSBen Gardon 	return role;
17202c00b3aSBen Gardon }
17302c00b3aSBen Gardon 
17402c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
17502c00b3aSBen Gardon 					       int level)
17602c00b3aSBen Gardon {
17702c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
17802c00b3aSBen Gardon 
17902c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
18002c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
18102c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
18202c00b3aSBen Gardon 
18302c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
18402c00b3aSBen Gardon 	sp->gfn = gfn;
18502c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
18602c00b3aSBen Gardon 
18733dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
18833dd3574SBen Gardon 
18902c00b3aSBen Gardon 	return sp;
19002c00b3aSBen Gardon }
19102c00b3aSBen Gardon 
1926e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
19302c00b3aSBen Gardon {
19402c00b3aSBen Gardon 	union kvm_mmu_page_role role;
19502c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
19602c00b3aSBen Gardon 	struct kvm_mmu_page *root;
19702c00b3aSBen Gardon 
1986e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
19902c00b3aSBen Gardon 
20002c00b3aSBen Gardon 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
20102c00b3aSBen Gardon 
20202c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
203a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
204fb101293SBen Gardon 		if (root->role.word == role.word &&
205fb101293SBen Gardon 		    kvm_tdp_mmu_get_root(kvm, root))
2066e6ec584SSean Christopherson 			goto out;
20702c00b3aSBen Gardon 	}
20802c00b3aSBen Gardon 
20902c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
21011cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
21102c00b3aSBen Gardon 
212c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
213c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
214c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
21502c00b3aSBen Gardon 
2166e6ec584SSean Christopherson out:
21702c00b3aSBen Gardon 	return __pa(root->spt);
218fe5db27dSBen Gardon }
2192f2fad08SBen Gardon 
2202f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2219a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2229a77daacSBen Gardon 				bool shared);
2232f2fad08SBen Gardon 
224f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
225f8e14497SBen Gardon {
226f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
227f8e14497SBen Gardon 		return;
228f8e14497SBen Gardon 
229f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
23064bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
23164bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
232f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
233f8e14497SBen Gardon }
234f8e14497SBen Gardon 
235a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
236a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
237a6a0b05dSBen Gardon {
238a6a0b05dSBen Gardon 	bool pfn_changed;
239a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
240a6a0b05dSBen Gardon 
241a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
242a6a0b05dSBen Gardon 		return;
243a6a0b05dSBen Gardon 
244a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
245a6a0b05dSBen Gardon 
246a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
247a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
248a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
249fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
250a6a0b05dSBen Gardon 	}
251a6a0b05dSBen Gardon }
252a6a0b05dSBen Gardon 
2532f2fad08SBen Gardon /**
254a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
255a9442f59SBen Gardon  *
256a9442f59SBen Gardon  * @kvm: kvm instance
257a9442f59SBen Gardon  * @sp: the new page
2589a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2599a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2609a77daacSBen Gardon  *	    threads that might be adding or removing pages.
261a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
262a9442f59SBen Gardon  *		eventual reclaim.
263a9442f59SBen Gardon  */
264a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2659a77daacSBen Gardon 			      bool shared, bool account_nx)
266a9442f59SBen Gardon {
2679a77daacSBen Gardon 	if (shared)
2689a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2699a77daacSBen Gardon 	else
270a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
271a9442f59SBen Gardon 
272a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
273a9442f59SBen Gardon 	if (account_nx)
274a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2759a77daacSBen Gardon 
2769a77daacSBen Gardon 	if (shared)
2779a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
278a9442f59SBen Gardon }
279a9442f59SBen Gardon 
280a9442f59SBen Gardon /**
281a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
282a9442f59SBen Gardon  *
283a9442f59SBen Gardon  * @kvm: kvm instance
284a9442f59SBen Gardon  * @sp: the page to be removed
2859a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2869a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2879a77daacSBen Gardon  *	    threads that might be adding or removing pages.
288a9442f59SBen Gardon  */
2899a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2909a77daacSBen Gardon 				bool shared)
291a9442f59SBen Gardon {
2929a77daacSBen Gardon 	if (shared)
2939a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2949a77daacSBen Gardon 	else
295a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
296a9442f59SBen Gardon 
297a9442f59SBen Gardon 	list_del(&sp->link);
298a9442f59SBen Gardon 	if (sp->lpage_disallowed)
299a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
3009a77daacSBen Gardon 
3019a77daacSBen Gardon 	if (shared)
3029a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
303a9442f59SBen Gardon }
304a9442f59SBen Gardon 
305a9442f59SBen Gardon /**
306a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
307a066e61fSBen Gardon  *
308a066e61fSBen Gardon  * @kvm: kvm instance
309a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3109a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3119a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3129a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
313a066e61fSBen Gardon  *
314a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
315a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
31670fb3e41SBen Gardon  *
31770fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
31870fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
31970fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
32070fb3e41SBen Gardon  * early rcu_dereferences in the function.
321a066e61fSBen Gardon  */
32270fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
3239a77daacSBen Gardon 					bool shared)
324a066e61fSBen Gardon {
32570fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
326a066e61fSBen Gardon 	int level = sp->role.level;
327e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
328a066e61fSBen Gardon 	u64 old_child_spte;
3299a77daacSBen Gardon 	u64 *sptep;
330e25f0e0cSBen Gardon 	gfn_t gfn;
331a066e61fSBen Gardon 	int i;
332a066e61fSBen Gardon 
333a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
334a066e61fSBen Gardon 
3359a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
336a066e61fSBen Gardon 
337a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
33870fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
339f1b83255SKai Huang 		gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
3409a77daacSBen Gardon 
3419a77daacSBen Gardon 		if (shared) {
342e25f0e0cSBen Gardon 			/*
343e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
344e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
345e25f0e0cSBen Gardon 			 * already marked as removed then another thread
346e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
347e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
348e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
349e25f0e0cSBen Gardon 			 */
350e25f0e0cSBen Gardon 			for (;;) {
351e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
352e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
353e25f0e0cSBen Gardon 					break;
354e25f0e0cSBen Gardon 				cpu_relax();
355e25f0e0cSBen Gardon 			}
3569a77daacSBen Gardon 		} else {
3578df9f1afSSean Christopherson 			/*
3588df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3598df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3608df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3618df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3628df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3638df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3648df9f1afSSean Christopherson 			 * unreachable.
3658df9f1afSSean Christopherson 			 */
3669a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3678df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3688df9f1afSSean Christopherson 				continue;
369e25f0e0cSBen Gardon 
370e25f0e0cSBen Gardon 			/*
371e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
372e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
373e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
374e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
375e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
376e25f0e0cSBen Gardon 			 * the function.
377e25f0e0cSBen Gardon 			 */
378e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3799a77daacSBen Gardon 		}
380e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
381f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
382e25f0e0cSBen Gardon 				    shared);
383a066e61fSBen Gardon 	}
384a066e61fSBen Gardon 
385a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
386f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
387a066e61fSBen Gardon 
3887cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
389a066e61fSBen Gardon }
390a066e61fSBen Gardon 
391a066e61fSBen Gardon /**
3927f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
3932f2fad08SBen Gardon  * @kvm: kvm instance
3942f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3952f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3962f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3972f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3982f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3999a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
4009a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
4019a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
4022f2fad08SBen Gardon  *
4032f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
4042f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
4052f2fad08SBen Gardon  */
4062f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4079a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
4089a77daacSBen Gardon 				  bool shared)
4092f2fad08SBen Gardon {
4102f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4112f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4122f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4132f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4142f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4152f2fad08SBen Gardon 
4162f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4172f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
418764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4192f2fad08SBen Gardon 
4202f2fad08SBen Gardon 	/*
4212f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4222f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4232f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
424d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4252f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4262f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4272f2fad08SBen Gardon 	 * thread before replacement.
4282f2fad08SBen Gardon 	 */
4292f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4302f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4312f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4322f2fad08SBen Gardon 		       "different PFN!\n"
4332f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4342f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4352f2fad08SBen Gardon 
4362f2fad08SBen Gardon 		/*
4372f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
438d9f6e12fSIngo Molnar 		 * corruption.
4392f2fad08SBen Gardon 		 */
4402f2fad08SBen Gardon 		BUG();
4412f2fad08SBen Gardon 	}
4422f2fad08SBen Gardon 
4432f2fad08SBen Gardon 	if (old_spte == new_spte)
4442f2fad08SBen Gardon 		return;
4452f2fad08SBen Gardon 
446b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
447b9a98c34SBen Gardon 
4482f2fad08SBen Gardon 	/*
4492f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4502f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4512f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4522f2fad08SBen Gardon 	 */
4532f2fad08SBen Gardon 	if (!was_present && !is_present) {
4542f2fad08SBen Gardon 		/*
45508f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
45608f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
45708f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
45808f07c80SBen Gardon 		 * are nonpresent.
4592f2fad08SBen Gardon 		 */
46008f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
46108f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
46208f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4632f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4642f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4652f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
46608f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
46708f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4682f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4692f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4702f2fad08SBen Gardon 		return;
4712f2fad08SBen Gardon 	}
4722f2fad08SBen Gardon 
473*71f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
474*71f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4752f2fad08SBen Gardon 
4762f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
47764bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4782f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4792f2fad08SBen Gardon 
4802f2fad08SBen Gardon 	/*
4812f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4822f2fad08SBen Gardon 	 * the paging structure.
4832f2fad08SBen Gardon 	 */
484a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
485a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4869a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4872f2fad08SBen Gardon }
4882f2fad08SBen Gardon 
4892f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4909a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4919a77daacSBen Gardon 				bool shared)
4922f2fad08SBen Gardon {
4939a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4949a77daacSBen Gardon 			      shared);
495f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
496a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
497a6a0b05dSBen Gardon 				      new_spte, level);
4982f2fad08SBen Gardon }
499faaf05b0SBen Gardon 
500fe43fa2fSBen Gardon /*
50124ae4cfaSBen Gardon  * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
50224ae4cfaSBen Gardon  * and handle the associated bookkeeping, but do not mark the page dirty
50324ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
5049a77daacSBen Gardon  *
5059a77daacSBen Gardon  * @kvm: kvm instance
5069a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5079a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5089a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
5099a77daacSBen Gardon  *	    this function will have no side-effects.
5109a77daacSBen Gardon  */
51124ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
5129a77daacSBen Gardon 							struct tdp_iter *iter,
5139a77daacSBen Gardon 							u64 new_spte)
5149a77daacSBen Gardon {
5159a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5169a77daacSBen Gardon 
51708f07c80SBen Gardon 	/*
51808f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
51908f07c80SBen Gardon 	 * may modify it.
52008f07c80SBen Gardon 	 */
5217a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
52208f07c80SBen Gardon 		return false;
52308f07c80SBen Gardon 
5246e8eb206SDavid Matlack 	/*
5256e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5266e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5276e8eb206SDavid Matlack 	 */
5289a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
5299a77daacSBen Gardon 		      new_spte) != iter->old_spte)
5309a77daacSBen Gardon 		return false;
5319a77daacSBen Gardon 
53224ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
53308889894SSean Christopherson 			      new_spte, iter->level, true);
53424ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5359a77daacSBen Gardon 
5369a77daacSBen Gardon 	return true;
5379a77daacSBen Gardon }
5389a77daacSBen Gardon 
539081de470SDavid Matlack /*
540081de470SDavid Matlack  * tdp_mmu_map_set_spte_atomic - Set a leaf TDP MMU SPTE atomically to resolve a
541081de470SDavid Matlack  * TDP page fault.
542081de470SDavid Matlack  *
543081de470SDavid Matlack  * @vcpu: The vcpu instance that took the TDP page fault.
544081de470SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
545081de470SDavid Matlack  * @new_spte: The value the SPTE should be set to
546081de470SDavid Matlack  *
547081de470SDavid Matlack  * Returns: true if the SPTE was set, false if it was not. If false is returned,
548081de470SDavid Matlack  *	    this function will have no side-effects.
549081de470SDavid Matlack  */
550081de470SDavid Matlack static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu,
55124ae4cfaSBen Gardon 					       struct tdp_iter *iter,
55224ae4cfaSBen Gardon 					       u64 new_spte)
55324ae4cfaSBen Gardon {
554081de470SDavid Matlack 	struct kvm *kvm = vcpu->kvm;
555081de470SDavid Matlack 
55624ae4cfaSBen Gardon 	if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
55724ae4cfaSBen Gardon 		return false;
55824ae4cfaSBen Gardon 
559081de470SDavid Matlack 	/*
560081de470SDavid Matlack 	 * Use kvm_vcpu_gfn_to_memslot() instead of going through
561081de470SDavid Matlack 	 * handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot.
562081de470SDavid Matlack 	 */
563081de470SDavid Matlack 	if (is_writable_pte(new_spte)) {
564081de470SDavid Matlack 		struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn);
565081de470SDavid Matlack 
566081de470SDavid Matlack 		if (slot && kvm_slot_dirty_track_enabled(slot)) {
567081de470SDavid Matlack 			/* Enforced by kvm_mmu_hugepage_adjust. */
568081de470SDavid Matlack 			WARN_ON_ONCE(iter->level > PG_LEVEL_4K);
569081de470SDavid Matlack 			mark_page_dirty_in_slot(kvm, slot, iter->gfn);
570081de470SDavid Matlack 		}
571081de470SDavid Matlack 	}
572081de470SDavid Matlack 
57324ae4cfaSBen Gardon 	return true;
57424ae4cfaSBen Gardon }
57524ae4cfaSBen Gardon 
57608f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
57708f07c80SBen Gardon 					   struct tdp_iter *iter)
57808f07c80SBen Gardon {
57908f07c80SBen Gardon 	/*
58008f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
58108f07c80SBen Gardon 	 * non-present value. This will stop other threads from
58208f07c80SBen Gardon 	 * immediately installing a present entry in its place
58308f07c80SBen Gardon 	 * before the TLBs are flushed.
58408f07c80SBen Gardon 	 */
585081de470SDavid Matlack 	if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, REMOVED_SPTE))
58608f07c80SBen Gardon 		return false;
58708f07c80SBen Gardon 
58808f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
58908f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
59008f07c80SBen Gardon 
59108f07c80SBen Gardon 	/*
59208f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
59308f07c80SBen Gardon 	 * must either wait on the MMU lock or use
594d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
59508f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
59608f07c80SBen Gardon 	 * here since the SPTE is going from non-present
59708f07c80SBen Gardon 	 * to non-present.
59808f07c80SBen Gardon 	 */
59914f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
60008f07c80SBen Gardon 
60108f07c80SBen Gardon 	return true;
60208f07c80SBen Gardon }
60308f07c80SBen Gardon 
6049a77daacSBen Gardon 
6059a77daacSBen Gardon /*
606fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
607fe43fa2fSBen Gardon  * @kvm: kvm instance
608fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
609fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
610fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
611fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
612fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
613fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
614fe43fa2fSBen Gardon  *		      double counted.
615fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
616fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
617fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
618fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
619fe43fa2fSBen Gardon  *		      writes from being double counted.
620fe43fa2fSBen Gardon  */
621f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
622a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
623a6a0b05dSBen Gardon 				      bool record_dirty_log)
624faaf05b0SBen Gardon {
625531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
6263a9a4aa5SBen Gardon 
62708f07c80SBen Gardon 	/*
62808f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
62908f07c80SBen Gardon 	 * temporary removed SPTE value.
63008f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
63108f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
63208f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
63308f07c80SBen Gardon 	 */
6347a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
63508f07c80SBen Gardon 
6367cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
637faaf05b0SBen Gardon 
63808889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
63908889894SSean Christopherson 			      new_spte, iter->level, false);
640f8e14497SBen Gardon 	if (record_acc_track)
641f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
642f8e14497SBen Gardon 					      iter->level);
643a6a0b05dSBen Gardon 	if (record_dirty_log)
64408889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
645a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
646a6a0b05dSBen Gardon 					      iter->level);
647f8e14497SBen Gardon }
648f8e14497SBen Gardon 
649f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
650f8e14497SBen Gardon 				    u64 new_spte)
651f8e14497SBen Gardon {
652a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
653f8e14497SBen Gardon }
654f8e14497SBen Gardon 
655f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
656f8e14497SBen Gardon 						 struct tdp_iter *iter,
657f8e14497SBen Gardon 						 u64 new_spte)
658f8e14497SBen Gardon {
659a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
660a6a0b05dSBen Gardon }
661a6a0b05dSBen Gardon 
662a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
663a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
664a6a0b05dSBen Gardon 						 u64 new_spte)
665a6a0b05dSBen Gardon {
666a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
667faaf05b0SBen Gardon }
668faaf05b0SBen Gardon 
669faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
670faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
671faaf05b0SBen Gardon 
672f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
673f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
674f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
675f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
676f8e14497SBen Gardon 			continue;					\
677f8e14497SBen Gardon 		else
678f8e14497SBen Gardon 
679bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
680bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
681bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
682bb18842eSBen Gardon 
683faaf05b0SBen Gardon /*
684e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
685e28a436cSBen Gardon  * to the scheduler.
686e28a436cSBen Gardon  *
687e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
688e139a34eSBen Gardon  * TLB flush before yielding.
689e139a34eSBen Gardon  *
690e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
691ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
692ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
693ed5e484bSBen Gardon  * paging structure root.
694e28a436cSBen Gardon  *
695e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
696e28a436cSBen Gardon  * Return false if a yield was not needed.
697e28a436cSBen Gardon  */
698e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
6996103bc07SBen Gardon 					     struct tdp_iter *iter, bool flush,
7006103bc07SBen Gardon 					     bool shared)
701a6a0b05dSBen Gardon {
702ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
703ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
704ed5e484bSBen Gardon 		return false;
705ed5e484bSBen Gardon 
706531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7077cca2d0bSBen Gardon 		rcu_read_unlock();
7087cca2d0bSBen Gardon 
709e139a34eSBen Gardon 		if (flush)
710e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
711e139a34eSBen Gardon 
7126103bc07SBen Gardon 		if (shared)
7136103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
7146103bc07SBen Gardon 		else
715531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
7166103bc07SBen Gardon 
7177cca2d0bSBen Gardon 		rcu_read_lock();
718ed5e484bSBen Gardon 
719ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
720ed5e484bSBen Gardon 
721b601c3bcSBen Gardon 		tdp_iter_restart(iter);
722ed5e484bSBen Gardon 
723e28a436cSBen Gardon 		return true;
724a6a0b05dSBen Gardon 	}
725e28a436cSBen Gardon 
726e28a436cSBen Gardon 	return false;
727a6a0b05dSBen Gardon }
728a6a0b05dSBen Gardon 
729faaf05b0SBen Gardon /*
730faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
731faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
732faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
733faaf05b0SBen Gardon  * MMU lock.
7346103bc07SBen Gardon  *
735063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
736063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
737063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
738063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
7396103bc07SBen Gardon  * operation can cause a soft lockup.
7406103bc07SBen Gardon  *
7416103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7426103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7436103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7446103bc07SBen Gardon  * MMU lock in write mode.
745faaf05b0SBen Gardon  */
746faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7476103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7486103bc07SBen Gardon 			  bool shared)
749faaf05b0SBen Gardon {
750524a1e4eSSean Christopherson 	gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
751524a1e4eSSean Christopherson 	bool zap_all = (start == 0 && end >= max_gfn_host);
752faaf05b0SBen Gardon 	struct tdp_iter iter;
753faaf05b0SBen Gardon 
754524a1e4eSSean Christopherson 	/*
7550103098fSSean Christopherson 	 * No need to try to step down in the iterator when zapping all SPTEs,
7560103098fSSean Christopherson 	 * zapping the top-level non-leaf SPTEs will recurse on their children.
7570103098fSSean Christopherson 	 */
7580103098fSSean Christopherson 	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
7590103098fSSean Christopherson 
7600103098fSSean Christopherson 	/*
761524a1e4eSSean Christopherson 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
762524a1e4eSSean Christopherson 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
763524a1e4eSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
764524a1e4eSSean Christopherson 	 */
765524a1e4eSSean Christopherson 	end = min(end, max_gfn_host);
766524a1e4eSSean Christopherson 
7676103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
7686103bc07SBen Gardon 
7697cca2d0bSBen Gardon 	rcu_read_lock();
7707cca2d0bSBen Gardon 
7710103098fSSean Christopherson 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
7720103098fSSean Christopherson 				   min_level, start, end) {
7736103bc07SBen Gardon retry:
7741af4a960SBen Gardon 		if (can_yield &&
7756103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
776a835429cSSean Christopherson 			flush = false;
7771af4a960SBen Gardon 			continue;
7781af4a960SBen Gardon 		}
7791af4a960SBen Gardon 
780faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
781faaf05b0SBen Gardon 			continue;
782faaf05b0SBen Gardon 
783faaf05b0SBen Gardon 		/*
784faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
785faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
786524a1e4eSSean Christopherson 		 * lower level, except when zapping all SPTEs.
787faaf05b0SBen Gardon 		 */
788524a1e4eSSean Christopherson 		if (!zap_all &&
789524a1e4eSSean Christopherson 		    (iter.gfn < start ||
790faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
791faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
792faaf05b0SBen Gardon 			continue;
793faaf05b0SBen Gardon 
7946103bc07SBen Gardon 		if (!shared) {
795faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
796a835429cSSean Christopherson 			flush = true;
7976103bc07SBen Gardon 		} else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
7986103bc07SBen Gardon 			/*
7996103bc07SBen Gardon 			 * The iter must explicitly re-read the SPTE because
8006103bc07SBen Gardon 			 * the atomic cmpxchg failed.
8016103bc07SBen Gardon 			 */
8026103bc07SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
8036103bc07SBen Gardon 			goto retry;
8046103bc07SBen Gardon 		}
805faaf05b0SBen Gardon 	}
8067cca2d0bSBen Gardon 
8077cca2d0bSBen Gardon 	rcu_read_unlock();
808a835429cSSean Christopherson 	return flush;
809faaf05b0SBen Gardon }
810faaf05b0SBen Gardon 
811faaf05b0SBen Gardon /*
812faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
813faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
814faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
815faaf05b0SBen Gardon  * MMU lock.
816faaf05b0SBen Gardon  */
8172b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
8185a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
819faaf05b0SBen Gardon {
820faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
821faaf05b0SBen Gardon 
8225a324c24SSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
8236103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
8245a324c24SSean Christopherson 				      false);
825faaf05b0SBen Gardon 
826faaf05b0SBen Gardon 	return flush;
827faaf05b0SBen Gardon }
828faaf05b0SBen Gardon 
829faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
830faaf05b0SBen Gardon {
8312b9663d8SSean Christopherson 	bool flush = false;
8322b9663d8SSean Christopherson 	int i;
833faaf05b0SBen Gardon 
8342b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
8355a324c24SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
8362b9663d8SSean Christopherson 
837faaf05b0SBen Gardon 	if (flush)
838faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
839faaf05b0SBen Gardon }
840bb18842eSBen Gardon 
8414c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
8424c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
8434c6654bdSBen Gardon {
8444c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8454c6654bdSBen Gardon 
8464c6654bdSBen Gardon 	if (prev_root)
8474c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8484c6654bdSBen Gardon 						  &prev_root->link,
8494c6654bdSBen Gardon 						  typeof(*prev_root), link);
8504c6654bdSBen Gardon 	else
8514c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8524c6654bdSBen Gardon 						   typeof(*next_root), link);
8534c6654bdSBen Gardon 
8544c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
8554c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
8564c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8574c6654bdSBen Gardon 						  &next_root->link,
8584c6654bdSBen Gardon 						  typeof(*next_root), link);
8594c6654bdSBen Gardon 
8604c6654bdSBen Gardon 	return next_root;
8614c6654bdSBen Gardon }
8624c6654bdSBen Gardon 
8634c6654bdSBen Gardon /*
8644c6654bdSBen Gardon  * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
8654c6654bdSBen Gardon  * invalidated root, they will not be freed until this function drops the
8664c6654bdSBen Gardon  * reference. Before dropping that reference, tear down the paging
8674c6654bdSBen Gardon  * structure so that whichever thread does drop the last reference
8684c6654bdSBen Gardon  * only has to do a trivial amount of work. Since the roots are invalid,
8694c6654bdSBen Gardon  * no new SPTEs should be created under them.
8704c6654bdSBen Gardon  */
8714c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
8724c6654bdSBen Gardon {
8734c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8744c6654bdSBen Gardon 	struct kvm_mmu_page *root;
8754c6654bdSBen Gardon 	bool flush = false;
8764c6654bdSBen Gardon 
8774c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
8784c6654bdSBen Gardon 
8794c6654bdSBen Gardon 	rcu_read_lock();
8804c6654bdSBen Gardon 
8814c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
8824c6654bdSBen Gardon 
8834c6654bdSBen Gardon 	while (root) {
8844c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
8854c6654bdSBen Gardon 
8864c6654bdSBen Gardon 		rcu_read_unlock();
8874c6654bdSBen Gardon 
888524a1e4eSSean Christopherson 		flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
8894c6654bdSBen Gardon 
8904c6654bdSBen Gardon 		/*
8914c6654bdSBen Gardon 		 * Put the reference acquired in
8924c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
8934c6654bdSBen Gardon 		 */
8944c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
8954c6654bdSBen Gardon 
8964c6654bdSBen Gardon 		root = next_root;
8974c6654bdSBen Gardon 
8984c6654bdSBen Gardon 		rcu_read_lock();
8994c6654bdSBen Gardon 	}
9004c6654bdSBen Gardon 
9014c6654bdSBen Gardon 	rcu_read_unlock();
9024c6654bdSBen Gardon 
9034c6654bdSBen Gardon 	if (flush)
9044c6654bdSBen Gardon 		kvm_flush_remote_tlbs(kvm);
9054c6654bdSBen Gardon }
9064c6654bdSBen Gardon 
907bb18842eSBen Gardon /*
908b7cccd39SBen Gardon  * Mark each TDP MMU root as invalid so that other threads
909b7cccd39SBen Gardon  * will drop their references and allow the root count to
910b7cccd39SBen Gardon  * go to 0.
911b7cccd39SBen Gardon  *
9124c6654bdSBen Gardon  * Also take a reference on all roots so that this thread
9134c6654bdSBen Gardon  * can do the bulk of the work required to free the roots
9144c6654bdSBen Gardon  * once they are invalidated. Without this reference, a
9154c6654bdSBen Gardon  * vCPU thread might drop the last reference to a root and
9164c6654bdSBen Gardon  * get stuck with tearing down the entire paging structure.
9174c6654bdSBen Gardon  *
9184c6654bdSBen Gardon  * Roots which have a zero refcount should be skipped as
9194c6654bdSBen Gardon  * they're already being torn down.
9204c6654bdSBen Gardon  * Already invalid roots should be referenced again so that
9214c6654bdSBen Gardon  * they aren't freed before kvm_tdp_mmu_zap_all_fast is
9224c6654bdSBen Gardon  * done with them.
9234c6654bdSBen Gardon  *
924b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
925b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
926b7cccd39SBen Gardon  */
927b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
928b7cccd39SBen Gardon {
929b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
930b7cccd39SBen Gardon 
931b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
932b7cccd39SBen Gardon 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
9334c6654bdSBen Gardon 		if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
934b7cccd39SBen Gardon 			root->role.invalid = true;
935b7cccd39SBen Gardon }
936b7cccd39SBen Gardon 
937bb18842eSBen Gardon /*
938bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
939bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
940bb18842eSBen Gardon  */
941bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
942bb18842eSBen Gardon 					  int map_writable,
943bb18842eSBen Gardon 					  struct tdp_iter *iter,
944bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
945bb18842eSBen Gardon {
946bb18842eSBen Gardon 	u64 new_spte;
94757a3e96dSKai Huang 	int ret = RET_PF_FIXED;
948bb18842eSBen Gardon 	int make_spte_ret = 0;
949bb18842eSBen Gardon 
9509a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
951bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
9529a77daacSBen Gardon 	else
953bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
954bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
955bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
956bb18842eSBen Gardon 					 &new_spte);
957bb18842eSBen Gardon 
958bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
959bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
960081de470SDavid Matlack 	else if (!tdp_mmu_map_set_spte_atomic(vcpu, iter, new_spte))
9619a77daacSBen Gardon 		return RET_PF_RETRY;
962bb18842eSBen Gardon 
963bb18842eSBen Gardon 	/*
964bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
965bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
966bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
967bb18842eSBen Gardon 	 */
968bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
969bb18842eSBen Gardon 		if (write)
970bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
971bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
972bb18842eSBen Gardon 	}
973bb18842eSBen Gardon 
974bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
9759a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
9769a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
9779a77daacSBen Gardon 				     new_spte);
978bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
9793849e092SSean Christopherson 	} else {
9809a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
9819a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
9823849e092SSean Christopherson 	}
983bb18842eSBen Gardon 
984857f8474SKai Huang 	/*
985857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
986857f8474SKai Huang 	 * consistent with legacy MMU behavior.
987857f8474SKai Huang 	 */
988857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
989bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
990bb18842eSBen Gardon 
991bb18842eSBen Gardon 	return ret;
992bb18842eSBen Gardon }
993bb18842eSBen Gardon 
994bb18842eSBen Gardon /*
995bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
996bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
997bb18842eSBen Gardon  */
998bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
999bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
1000bb18842eSBen Gardon 		    bool prefault)
1001bb18842eSBen Gardon {
1002bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
1003bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
1004bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
1005bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
1006bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1007bb18842eSBen Gardon 	struct tdp_iter iter;
100889c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1009bb18842eSBen Gardon 	u64 *child_pt;
1010bb18842eSBen Gardon 	u64 new_spte;
1011bb18842eSBen Gardon 	int ret;
1012bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
1013bb18842eSBen Gardon 	int level;
1014bb18842eSBen Gardon 	int req_level;
1015bb18842eSBen Gardon 
1016bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
1017bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
1018bb18842eSBen Gardon 
1019bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
10207cca2d0bSBen Gardon 
10217cca2d0bSBen Gardon 	rcu_read_lock();
10227cca2d0bSBen Gardon 
1023bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1024bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
1025bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
1026bb18842eSBen Gardon 						   iter.level, &pfn, &level);
1027bb18842eSBen Gardon 
1028bb18842eSBen Gardon 		if (iter.level == level)
1029bb18842eSBen Gardon 			break;
1030bb18842eSBen Gardon 
1031bb18842eSBen Gardon 		/*
1032bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1033bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1034bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1035bb18842eSBen Gardon 		 */
1036bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1037bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
103808f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
10399a77daacSBen Gardon 				break;
1040bb18842eSBen Gardon 
1041bb18842eSBen Gardon 			/*
1042bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1043bb18842eSBen Gardon 			 * because the new value informs the !present
1044bb18842eSBen Gardon 			 * path below.
1045bb18842eSBen Gardon 			 */
10467cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1047bb18842eSBen Gardon 		}
1048bb18842eSBen Gardon 
1049bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
1050ff76d506SKai Huang 			/*
1051c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1052ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1053ff76d506SKai Huang 			 * allocation and free.
1054ff76d506SKai Huang 			 */
1055ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1056ff76d506SKai Huang 				break;
1057ff76d506SKai Huang 
1058f1b83255SKai Huang 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
105989c0fd49SBen Gardon 			child_pt = sp->spt;
1060a9442f59SBen Gardon 
1061bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
1062bb18842eSBen Gardon 						     !shadow_accessed_mask);
1063bb18842eSBen Gardon 
1064081de470SDavid Matlack 			if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) {
10659a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
10669a77daacSBen Gardon 						  huge_page_disallowed &&
10679a77daacSBen Gardon 						  req_level >= iter.level);
10689a77daacSBen Gardon 
1069bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
10709a77daacSBen Gardon 			} else {
10719a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
10729a77daacSBen Gardon 				break;
10739a77daacSBen Gardon 			}
1074bb18842eSBen Gardon 		}
1075bb18842eSBen Gardon 	}
1076bb18842eSBen Gardon 
10779a77daacSBen Gardon 	if (iter.level != level) {
10787cca2d0bSBen Gardon 		rcu_read_unlock();
1079bb18842eSBen Gardon 		return RET_PF_RETRY;
10807cca2d0bSBen Gardon 	}
1081bb18842eSBen Gardon 
1082bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
1083bb18842eSBen Gardon 					      pfn, prefault);
10847cca2d0bSBen Gardon 	rcu_read_unlock();
1085bb18842eSBen Gardon 
1086bb18842eSBen Gardon 	return ret;
1087bb18842eSBen Gardon }
1088063afacdSBen Gardon 
10893039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
10903039bcc7SSean Christopherson 				 bool flush)
1091063afacdSBen Gardon {
1092063afacdSBen Gardon 	struct kvm_mmu_page *root;
1093063afacdSBen Gardon 
10943039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
10953039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
10966103bc07SBen Gardon 				       range->may_block, flush, false);
1097063afacdSBen Gardon 
10983039bcc7SSean Christopherson 	return flush;
10993039bcc7SSean Christopherson }
11003039bcc7SSean Christopherson 
11013039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
11023039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
11033039bcc7SSean Christopherson 
11043039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
11053039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1106c1b91493SSean Christopherson 						   tdp_handler_t handler)
1107063afacdSBen Gardon {
1108063afacdSBen Gardon 	struct kvm_mmu_page *root;
11093039bcc7SSean Christopherson 	struct tdp_iter iter;
11103039bcc7SSean Christopherson 	bool ret = false;
1111063afacdSBen Gardon 
11123039bcc7SSean Christopherson 	rcu_read_lock();
1113063afacdSBen Gardon 
1114063afacdSBen Gardon 	/*
1115e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1116e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1117063afacdSBen Gardon 	 */
11183039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
11193039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
11203039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
11213039bcc7SSean Christopherson 	}
1122063afacdSBen Gardon 
11233039bcc7SSean Christopherson 	rcu_read_unlock();
1124063afacdSBen Gardon 
1125063afacdSBen Gardon 	return ret;
1126063afacdSBen Gardon }
1127063afacdSBen Gardon 
1128f8e14497SBen Gardon /*
1129f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1130f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1131f8e14497SBen Gardon  */
11323039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
11333039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1134f8e14497SBen Gardon {
1135f8e14497SBen Gardon 	u64 new_spte = 0;
1136f8e14497SBen Gardon 
11373039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
11383039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
11393039bcc7SSean Christopherson 		return false;
11407cca2d0bSBen Gardon 
11413039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1142f8e14497SBen Gardon 
1143f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
11448f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1145f8e14497SBen Gardon 	} else {
1146f8e14497SBen Gardon 		/*
1147f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1148f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1149f8e14497SBen Gardon 		 */
1150f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1151f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1152f8e14497SBen Gardon 
1153f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1154f8e14497SBen Gardon 	}
1155f8e14497SBen Gardon 
11563039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
115733dd3574SBen Gardon 
11583039bcc7SSean Christopherson 	return true;
1159f8e14497SBen Gardon }
1160f8e14497SBen Gardon 
11613039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1162f8e14497SBen Gardon {
11633039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1164f8e14497SBen Gardon }
1165f8e14497SBen Gardon 
11663039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
11673039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1168f8e14497SBen Gardon {
11693039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1170f8e14497SBen Gardon }
1171f8e14497SBen Gardon 
11723039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1173f8e14497SBen Gardon {
11743039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
11753039bcc7SSean Christopherson }
11763039bcc7SSean Christopherson 
11773039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
11783039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
11793039bcc7SSean Christopherson {
11803039bcc7SSean Christopherson 	u64 new_spte;
11813039bcc7SSean Christopherson 
11823039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
11833039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
11843039bcc7SSean Christopherson 
11853039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
11863039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
11873039bcc7SSean Christopherson 		return false;
11883039bcc7SSean Christopherson 
11893039bcc7SSean Christopherson 	/*
11903039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
11913039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
11923039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
11933039bcc7SSean Christopherson 	 * See __handle_changed_spte().
11943039bcc7SSean Christopherson 	 */
11953039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
11963039bcc7SSean Christopherson 
11973039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
11983039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
11993039bcc7SSean Christopherson 								  pte_pfn(range->pte));
12003039bcc7SSean Christopherson 
12013039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
12023039bcc7SSean Christopherson 	}
12033039bcc7SSean Christopherson 
12043039bcc7SSean Christopherson 	return true;
1205f8e14497SBen Gardon }
12061d8dd6b3SBen Gardon 
12071d8dd6b3SBen Gardon /*
12081d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
12091d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
12101d8dd6b3SBen Gardon  * notifier.
12111d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
12121d8dd6b3SBen Gardon  */
12133039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
12141d8dd6b3SBen Gardon {
12153039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
12161d8dd6b3SBen Gardon 
12173039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
12183039bcc7SSean Christopherson 	if (flush)
12193039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
12207cca2d0bSBen Gardon 
12213039bcc7SSean Christopherson 	return false;
12221d8dd6b3SBen Gardon }
12231d8dd6b3SBen Gardon 
1224a6a0b05dSBen Gardon /*
1225bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1226bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1227bedd9195SDavid Matlack  * be flushed.
1228a6a0b05dSBen Gardon  */
1229a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1230a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1231a6a0b05dSBen Gardon {
1232a6a0b05dSBen Gardon 	struct tdp_iter iter;
1233a6a0b05dSBen Gardon 	u64 new_spte;
1234a6a0b05dSBen Gardon 	bool spte_set = false;
1235a6a0b05dSBen Gardon 
12367cca2d0bSBen Gardon 	rcu_read_lock();
12377cca2d0bSBen Gardon 
1238a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1239a6a0b05dSBen Gardon 
1240a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1241a6a0b05dSBen Gardon 				   min_level, start, end) {
124224ae4cfaSBen Gardon retry:
124324ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12441af4a960SBen Gardon 			continue;
12451af4a960SBen Gardon 
1246a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
12470f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
12480f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1249a6a0b05dSBen Gardon 			continue;
1250a6a0b05dSBen Gardon 
1251a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1252a6a0b05dSBen Gardon 
125324ae4cfaSBen Gardon 		if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
125424ae4cfaSBen Gardon 							  new_spte)) {
125524ae4cfaSBen Gardon 			/*
125624ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
125724ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
125824ae4cfaSBen Gardon 			 */
125924ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
126024ae4cfaSBen Gardon 			goto retry;
126124ae4cfaSBen Gardon 		}
1262a6a0b05dSBen Gardon 		spte_set = true;
1263a6a0b05dSBen Gardon 	}
12647cca2d0bSBen Gardon 
12657cca2d0bSBen Gardon 	rcu_read_unlock();
1266a6a0b05dSBen Gardon 	return spte_set;
1267a6a0b05dSBen Gardon }
1268a6a0b05dSBen Gardon 
1269a6a0b05dSBen Gardon /*
1270a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1271a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1272a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1273a6a0b05dSBen Gardon  */
1274269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1275269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1276a6a0b05dSBen Gardon {
1277a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1278a6a0b05dSBen Gardon 	bool spte_set = false;
1279a6a0b05dSBen Gardon 
128024ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1281a6a0b05dSBen Gardon 
128224ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1283a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1284a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1285a6a0b05dSBen Gardon 
1286a6a0b05dSBen Gardon 	return spte_set;
1287a6a0b05dSBen Gardon }
1288a6a0b05dSBen Gardon 
1289a6a0b05dSBen Gardon /*
1290a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1291a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1292a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1293a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1294a6a0b05dSBen Gardon  * be flushed.
1295a6a0b05dSBen Gardon  */
1296a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1297a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1298a6a0b05dSBen Gardon {
1299a6a0b05dSBen Gardon 	struct tdp_iter iter;
1300a6a0b05dSBen Gardon 	u64 new_spte;
1301a6a0b05dSBen Gardon 	bool spte_set = false;
1302a6a0b05dSBen Gardon 
13037cca2d0bSBen Gardon 	rcu_read_lock();
13047cca2d0bSBen Gardon 
1305a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
130624ae4cfaSBen Gardon retry:
130724ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
13081af4a960SBen Gardon 			continue;
13091af4a960SBen Gardon 
1310a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1311a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1312a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1313a6a0b05dSBen Gardon 			else
1314a6a0b05dSBen Gardon 				continue;
1315a6a0b05dSBen Gardon 		} else {
1316a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1317a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1318a6a0b05dSBen Gardon 			else
1319a6a0b05dSBen Gardon 				continue;
1320a6a0b05dSBen Gardon 		}
1321a6a0b05dSBen Gardon 
132224ae4cfaSBen Gardon 		if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
132324ae4cfaSBen Gardon 							  new_spte)) {
132424ae4cfaSBen Gardon 			/*
132524ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
132624ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
132724ae4cfaSBen Gardon 			 */
132824ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
132924ae4cfaSBen Gardon 			goto retry;
133024ae4cfaSBen Gardon 		}
1331a6a0b05dSBen Gardon 		spte_set = true;
1332a6a0b05dSBen Gardon 	}
13337cca2d0bSBen Gardon 
13347cca2d0bSBen Gardon 	rcu_read_unlock();
1335a6a0b05dSBen Gardon 	return spte_set;
1336a6a0b05dSBen Gardon }
1337a6a0b05dSBen Gardon 
1338a6a0b05dSBen Gardon /*
1339a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1340a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1341a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1342a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1343a6a0b05dSBen Gardon  * be flushed.
1344a6a0b05dSBen Gardon  */
1345269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1346269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1347a6a0b05dSBen Gardon {
1348a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1349a6a0b05dSBen Gardon 	bool spte_set = false;
1350a6a0b05dSBen Gardon 
135124ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1352a6a0b05dSBen Gardon 
135324ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1354a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1355a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1356a6a0b05dSBen Gardon 
1357a6a0b05dSBen Gardon 	return spte_set;
1358a6a0b05dSBen Gardon }
1359a6a0b05dSBen Gardon 
1360a6a0b05dSBen Gardon /*
1361a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1362a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1363a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1364a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1365a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1366a6a0b05dSBen Gardon  */
1367a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1368a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1369a6a0b05dSBen Gardon {
1370a6a0b05dSBen Gardon 	struct tdp_iter iter;
1371a6a0b05dSBen Gardon 	u64 new_spte;
1372a6a0b05dSBen Gardon 
13737cca2d0bSBen Gardon 	rcu_read_lock();
13747cca2d0bSBen Gardon 
1375a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1376a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1377a6a0b05dSBen Gardon 		if (!mask)
1378a6a0b05dSBen Gardon 			break;
1379a6a0b05dSBen Gardon 
1380a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1381a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1382a6a0b05dSBen Gardon 			continue;
1383a6a0b05dSBen Gardon 
1384f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1385f1b3b06aSBen Gardon 
1386a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1387a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1388a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1389a6a0b05dSBen Gardon 			else
1390a6a0b05dSBen Gardon 				continue;
1391a6a0b05dSBen Gardon 		} else {
1392a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1393a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1394a6a0b05dSBen Gardon 			else
1395a6a0b05dSBen Gardon 				continue;
1396a6a0b05dSBen Gardon 		}
1397a6a0b05dSBen Gardon 
1398a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1399a6a0b05dSBen Gardon 	}
14007cca2d0bSBen Gardon 
14017cca2d0bSBen Gardon 	rcu_read_unlock();
1402a6a0b05dSBen Gardon }
1403a6a0b05dSBen Gardon 
1404a6a0b05dSBen Gardon /*
1405a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1406a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1407a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1408a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1409a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1410a6a0b05dSBen Gardon  */
1411a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1412a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1413a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1414a6a0b05dSBen Gardon 				       bool wrprot)
1415a6a0b05dSBen Gardon {
1416a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1417a6a0b05dSBen Gardon 
1418531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1419a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1420a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1421a6a0b05dSBen Gardon }
1422a6a0b05dSBen Gardon 
1423a6a0b05dSBen Gardon /*
142487aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
142587aa9ec9SBen Gardon  * GFNs within the slot.
142614881998SBen Gardon  */
1427af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
142814881998SBen Gardon 				       struct kvm_mmu_page *root,
14298ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1430af95b53eSSean Christopherson 				       bool flush)
143114881998SBen Gardon {
14329eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
14339eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
143414881998SBen Gardon 	struct tdp_iter iter;
143514881998SBen Gardon 	kvm_pfn_t pfn;
143614881998SBen Gardon 
14377cca2d0bSBen Gardon 	rcu_read_lock();
14387cca2d0bSBen Gardon 
143914881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
14402db6f772SBen Gardon retry:
14412db6f772SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1442af95b53eSSean Christopherson 			flush = false;
14431af4a960SBen Gardon 			continue;
14441af4a960SBen Gardon 		}
14451af4a960SBen Gardon 
144614881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
144787aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
144814881998SBen Gardon 			continue;
144914881998SBen Gardon 
145014881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
145114881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
14529eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
14539eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
145414881998SBen Gardon 			continue;
145514881998SBen Gardon 
14562db6f772SBen Gardon 		if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
14572db6f772SBen Gardon 			/*
14582db6f772SBen Gardon 			 * The iter must explicitly re-read the SPTE because
14592db6f772SBen Gardon 			 * the atomic cmpxchg failed.
14602db6f772SBen Gardon 			 */
14612db6f772SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
14622db6f772SBen Gardon 			goto retry;
14632db6f772SBen Gardon 		}
1464af95b53eSSean Christopherson 		flush = true;
146514881998SBen Gardon 	}
146614881998SBen Gardon 
14677cca2d0bSBen Gardon 	rcu_read_unlock();
1468af95b53eSSean Christopherson 
1469af95b53eSSean Christopherson 	return flush;
147014881998SBen Gardon }
147114881998SBen Gardon 
147214881998SBen Gardon /*
147314881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
147414881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
147514881998SBen Gardon  */
1476142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
14778ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
14788ca6f063SBen Gardon 				       bool flush)
147914881998SBen Gardon {
148014881998SBen Gardon 	struct kvm_mmu_page *root;
148114881998SBen Gardon 
14822db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
148314881998SBen Gardon 
14842db6f772SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1485af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1486af95b53eSSean Christopherson 
1487142ccde1SSean Christopherson 	return flush;
148814881998SBen Gardon }
148946044f72SBen Gardon 
149046044f72SBen Gardon /*
149146044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
14925fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
149346044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
149446044f72SBen Gardon  */
149546044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
14963ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
149746044f72SBen Gardon {
149846044f72SBen Gardon 	struct tdp_iter iter;
149946044f72SBen Gardon 	u64 new_spte;
150046044f72SBen Gardon 	bool spte_set = false;
150146044f72SBen Gardon 
15023ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
15033ad93562SKeqian Zhu 
15047cca2d0bSBen Gardon 	rcu_read_lock();
15057cca2d0bSBen Gardon 
15063ad93562SKeqian Zhu 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
15073ad93562SKeqian Zhu 				   min_level, gfn, gfn + 1) {
15083ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
15093ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
15103ad93562SKeqian Zhu 			continue;
15113ad93562SKeqian Zhu 
151246044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
151346044f72SBen Gardon 			break;
151446044f72SBen Gardon 
151546044f72SBen Gardon 		new_spte = iter.old_spte &
15165fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
151746044f72SBen Gardon 
151846044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
151946044f72SBen Gardon 		spte_set = true;
152046044f72SBen Gardon 	}
152146044f72SBen Gardon 
15227cca2d0bSBen Gardon 	rcu_read_unlock();
15237cca2d0bSBen Gardon 
152446044f72SBen Gardon 	return spte_set;
152546044f72SBen Gardon }
152646044f72SBen Gardon 
152746044f72SBen Gardon /*
152846044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
15295fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
153046044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
153146044f72SBen Gardon  */
153246044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
15333ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
15343ad93562SKeqian Zhu 				   int min_level)
153546044f72SBen Gardon {
153646044f72SBen Gardon 	struct kvm_mmu_page *root;
153746044f72SBen Gardon 	bool spte_set = false;
153846044f72SBen Gardon 
1539531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1540a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
15413ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1542a3f15bdaSSean Christopherson 
154346044f72SBen Gardon 	return spte_set;
154446044f72SBen Gardon }
154546044f72SBen Gardon 
154695fb5b02SBen Gardon /*
154795fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
154895fb5b02SBen Gardon  * That SPTE may be non-present.
1549c5c8c7c5SDavid Matlack  *
1550c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
155195fb5b02SBen Gardon  */
155239b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
155339b4d43eSSean Christopherson 			 int *root_level)
155495fb5b02SBen Gardon {
155595fb5b02SBen Gardon 	struct tdp_iter iter;
155695fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
155795fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
15582aa07893SSean Christopherson 	int leaf = -1;
155995fb5b02SBen Gardon 
156039b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
156195fb5b02SBen Gardon 
156295fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
156395fb5b02SBen Gardon 		leaf = iter.level;
1564dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
156595fb5b02SBen Gardon 	}
156695fb5b02SBen Gardon 
156795fb5b02SBen Gardon 	return leaf;
156895fb5b02SBen Gardon }
15696e8eb206SDavid Matlack 
15706e8eb206SDavid Matlack /*
15716e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
15726e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
15736e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
15746e8eb206SDavid Matlack  *
15756e8eb206SDavid Matlack  * Contract:
15766e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
15776e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
15786e8eb206SDavid Matlack  *
15796e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
15806e8eb206SDavid Matlack  */
15816e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
15826e8eb206SDavid Matlack 					u64 *spte)
15836e8eb206SDavid Matlack {
15846e8eb206SDavid Matlack 	struct tdp_iter iter;
15856e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
15866e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
15876e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
15886e8eb206SDavid Matlack 
15896e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
15906e8eb206SDavid Matlack 		*spte = iter.old_spte;
15916e8eb206SDavid Matlack 		sptep = iter.sptep;
15926e8eb206SDavid Matlack 	}
15936e8eb206SDavid Matlack 
15946e8eb206SDavid Matlack 	/*
15956e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
15966e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
15976e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
15986e8eb206SDavid Matlack 	 * annotation.
15996e8eb206SDavid Matlack 	 *
16006e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
16016e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
16026e8eb206SDavid Matlack 	 * outside of mmu_lock.
16036e8eb206SDavid Matlack 	 */
16046e8eb206SDavid Matlack 	return rcu_dereference(sptep);
16056e8eb206SDavid Matlack }
1606