xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision b7cccd39)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
306103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
316103bc07SBen Gardon 							     bool shared)
326103bc07SBen Gardon {
336103bc07SBen Gardon 	if (shared)
346103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
356103bc07SBen Gardon 	else
366103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
376103bc07SBen Gardon }
386103bc07SBen Gardon 
39fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
40fe5db27dSBen Gardon {
41fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
42fe5db27dSBen Gardon 		return;
4302c00b3aSBen Gardon 
4402c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
457cca2d0bSBen Gardon 
467cca2d0bSBen Gardon 	/*
477cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
487cca2d0bSBen Gardon 	 * can run before the VM is torn down.
497cca2d0bSBen Gardon 	 */
507cca2d0bSBen Gardon 	rcu_barrier();
5102c00b3aSBen Gardon }
5202c00b3aSBen Gardon 
532bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
546103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
556103bc07SBen Gardon 			  bool shared);
562bdb3d84SBen Gardon 
572bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
58a889ea54SBen Gardon {
592bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
602bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
612bdb3d84SBen Gardon }
622bdb3d84SBen Gardon 
63c0e64238SBen Gardon /*
64c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
65c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
66c0e64238SBen Gardon  * the memory.
67c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
68c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
69c0e64238SBen Gardon  * memory won't use it after it is freed.
70c0e64238SBen Gardon  */
71c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
72c0e64238SBen Gardon {
73c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
74c0e64238SBen Gardon 					       rcu_head);
75c0e64238SBen Gardon 
76c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
77c0e64238SBen Gardon }
78c0e64238SBen Gardon 
796103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
806103bc07SBen Gardon 			  bool shared)
812bdb3d84SBen Gardon {
822bdb3d84SBen Gardon 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
832bdb3d84SBen Gardon 
846103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
852bdb3d84SBen Gardon 
8611cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
872bdb3d84SBen Gardon 		return;
882bdb3d84SBen Gardon 
892bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
902bdb3d84SBen Gardon 
91c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
92c0e64238SBen Gardon 	list_del_rcu(&root->link);
93c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
942bdb3d84SBen Gardon 
956103bc07SBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
962bdb3d84SBen Gardon 
97c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
98a889ea54SBen Gardon }
99a889ea54SBen Gardon 
100cfc10997SBen Gardon /*
101cfc10997SBen Gardon  * Finds the next valid root after root (or the first valid root if root
102cfc10997SBen Gardon  * is NULL), takes a reference on it, and returns that next root. If root
103cfc10997SBen Gardon  * is not NULL, this thread should have already taken a reference on it, and
104cfc10997SBen Gardon  * that reference will be dropped. If no valid root is found, this
105cfc10997SBen Gardon  * function will return NULL.
106cfc10997SBen Gardon  */
107cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1086103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
1096103bc07SBen Gardon 					      bool shared)
110a889ea54SBen Gardon {
111a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
112a889ea54SBen Gardon 
113c0e64238SBen Gardon 	rcu_read_lock();
114c0e64238SBen Gardon 
115cfc10997SBen Gardon 	if (prev_root)
116c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
117c0e64238SBen Gardon 						  &prev_root->link,
118c0e64238SBen Gardon 						  typeof(*prev_root), link);
119cfc10997SBen Gardon 	else
120c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121cfc10997SBen Gardon 						   typeof(*next_root), link);
122cfc10997SBen Gardon 
123c0e64238SBen Gardon 	while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
124c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
126fb101293SBen Gardon 
127c0e64238SBen Gardon 	rcu_read_unlock();
128cfc10997SBen Gardon 
129cfc10997SBen Gardon 	if (prev_root)
1306103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
131cfc10997SBen Gardon 
132a889ea54SBen Gardon 	return next_root;
133a889ea54SBen Gardon }
134a889ea54SBen Gardon 
135a889ea54SBen Gardon /*
136a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
137a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
138a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
139a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1406103bc07SBen Gardon  *
1416103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1426103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1436103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
144a889ea54SBen Gardon  */
1456103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
1466103bc07SBen Gardon 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared);		\
147cfc10997SBen Gardon 	     _root;							\
1486103bc07SBen Gardon 	     _root = tdp_mmu_next_root(_kvm, _root, _shared))		\
149a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
150a3f15bdaSSean Christopherson 		} else
151a889ea54SBen Gardon 
152a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)				\
153c0e64238SBen Gardon 	list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,		\
154c0e64238SBen Gardon 				lockdep_is_held_type(&kvm->mmu_lock, 0) ||	\
155c0e64238SBen Gardon 				lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock))	\
156a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
157a3f15bdaSSean Christopherson 		} else
15802c00b3aSBen Gardon 
15902c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
16002c00b3aSBen Gardon 						   int level)
16102c00b3aSBen Gardon {
16202c00b3aSBen Gardon 	union kvm_mmu_page_role role;
16302c00b3aSBen Gardon 
16402c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
16502c00b3aSBen Gardon 	role.level = level;
16602c00b3aSBen Gardon 	role.direct = true;
16702c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
16802c00b3aSBen Gardon 	role.access = ACC_ALL;
16902c00b3aSBen Gardon 
17002c00b3aSBen Gardon 	return role;
17102c00b3aSBen Gardon }
17202c00b3aSBen Gardon 
17302c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
17402c00b3aSBen Gardon 					       int level)
17502c00b3aSBen Gardon {
17602c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
17702c00b3aSBen Gardon 
17802c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
17902c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
18002c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
18102c00b3aSBen Gardon 
18202c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
18302c00b3aSBen Gardon 	sp->gfn = gfn;
18402c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
18502c00b3aSBen Gardon 
18633dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
18733dd3574SBen Gardon 
18802c00b3aSBen Gardon 	return sp;
18902c00b3aSBen Gardon }
19002c00b3aSBen Gardon 
1916e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
19202c00b3aSBen Gardon {
19302c00b3aSBen Gardon 	union kvm_mmu_page_role role;
19402c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
19502c00b3aSBen Gardon 	struct kvm_mmu_page *root;
19602c00b3aSBen Gardon 
1976e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
19802c00b3aSBen Gardon 
1996e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
20002c00b3aSBen Gardon 
20102c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
202a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
203fb101293SBen Gardon 		if (root->role.word == role.word &&
204fb101293SBen Gardon 		    kvm_tdp_mmu_get_root(kvm, root))
2056e6ec584SSean Christopherson 			goto out;
20602c00b3aSBen Gardon 	}
20702c00b3aSBen Gardon 
20802c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
20911cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
21002c00b3aSBen Gardon 
211c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
212c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
213c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
21402c00b3aSBen Gardon 
2156e6ec584SSean Christopherson out:
21602c00b3aSBen Gardon 	return __pa(root->spt);
217fe5db27dSBen Gardon }
2182f2fad08SBen Gardon 
2192f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2209a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2219a77daacSBen Gardon 				bool shared);
2222f2fad08SBen Gardon 
223f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
224f8e14497SBen Gardon {
225f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
226f8e14497SBen Gardon 		return;
227f8e14497SBen Gardon 
228f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
22964bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
23064bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
231f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
232f8e14497SBen Gardon }
233f8e14497SBen Gardon 
234a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
235a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
236a6a0b05dSBen Gardon {
237a6a0b05dSBen Gardon 	bool pfn_changed;
238a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
239a6a0b05dSBen Gardon 
240a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
241a6a0b05dSBen Gardon 		return;
242a6a0b05dSBen Gardon 
243a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
244a6a0b05dSBen Gardon 
245a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
246a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
247a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
248fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
249a6a0b05dSBen Gardon 	}
250a6a0b05dSBen Gardon }
251a6a0b05dSBen Gardon 
2522f2fad08SBen Gardon /**
253a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
254a9442f59SBen Gardon  *
255a9442f59SBen Gardon  * @kvm: kvm instance
256a9442f59SBen Gardon  * @sp: the new page
2579a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2589a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2599a77daacSBen Gardon  *	    threads that might be adding or removing pages.
260a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
261a9442f59SBen Gardon  *		eventual reclaim.
262a9442f59SBen Gardon  */
263a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2649a77daacSBen Gardon 			      bool shared, bool account_nx)
265a9442f59SBen Gardon {
2669a77daacSBen Gardon 	if (shared)
2679a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2689a77daacSBen Gardon 	else
269a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
270a9442f59SBen Gardon 
271a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
272a9442f59SBen Gardon 	if (account_nx)
273a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2749a77daacSBen Gardon 
2759a77daacSBen Gardon 	if (shared)
2769a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
277a9442f59SBen Gardon }
278a9442f59SBen Gardon 
279a9442f59SBen Gardon /**
280a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
281a9442f59SBen Gardon  *
282a9442f59SBen Gardon  * @kvm: kvm instance
283a9442f59SBen Gardon  * @sp: the page to be removed
2849a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2859a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2869a77daacSBen Gardon  *	    threads that might be adding or removing pages.
287a9442f59SBen Gardon  */
2889a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2899a77daacSBen Gardon 				bool shared)
290a9442f59SBen Gardon {
2919a77daacSBen Gardon 	if (shared)
2929a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2939a77daacSBen Gardon 	else
294a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
295a9442f59SBen Gardon 
296a9442f59SBen Gardon 	list_del(&sp->link);
297a9442f59SBen Gardon 	if (sp->lpage_disallowed)
298a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2999a77daacSBen Gardon 
3009a77daacSBen Gardon 	if (shared)
3019a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
302a9442f59SBen Gardon }
303a9442f59SBen Gardon 
304a9442f59SBen Gardon /**
305a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
306a066e61fSBen Gardon  *
307a066e61fSBen Gardon  * @kvm: kvm instance
308a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3099a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3109a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3119a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
312a066e61fSBen Gardon  *
313a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
314a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
31570fb3e41SBen Gardon  *
31670fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
31770fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
31870fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
31970fb3e41SBen Gardon  * early rcu_dereferences in the function.
320a066e61fSBen Gardon  */
32170fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
3229a77daacSBen Gardon 					bool shared)
323a066e61fSBen Gardon {
32470fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
325a066e61fSBen Gardon 	int level = sp->role.level;
326e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
327a066e61fSBen Gardon 	u64 old_child_spte;
3289a77daacSBen Gardon 	u64 *sptep;
329e25f0e0cSBen Gardon 	gfn_t gfn;
330a066e61fSBen Gardon 	int i;
331a066e61fSBen Gardon 
332a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
333a066e61fSBen Gardon 
3349a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
335a066e61fSBen Gardon 
336a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
33770fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
338e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3399a77daacSBen Gardon 
3409a77daacSBen Gardon 		if (shared) {
341e25f0e0cSBen Gardon 			/*
342e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
343e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
344e25f0e0cSBen Gardon 			 * already marked as removed then another thread
345e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
346e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
347e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
348e25f0e0cSBen Gardon 			 */
349e25f0e0cSBen Gardon 			for (;;) {
350e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
351e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
352e25f0e0cSBen Gardon 					break;
353e25f0e0cSBen Gardon 				cpu_relax();
354e25f0e0cSBen Gardon 			}
3559a77daacSBen Gardon 		} else {
3568df9f1afSSean Christopherson 			/*
3578df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3588df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3598df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3608df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3618df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3628df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3638df9f1afSSean Christopherson 			 * unreachable.
3648df9f1afSSean Christopherson 			 */
3659a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3668df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3678df9f1afSSean Christopherson 				continue;
368e25f0e0cSBen Gardon 
369e25f0e0cSBen Gardon 			/*
370e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
371e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
372e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
373e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
374e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
375e25f0e0cSBen Gardon 			 * the function.
376e25f0e0cSBen Gardon 			 */
377e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3789a77daacSBen Gardon 		}
379e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
380e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
381e25f0e0cSBen Gardon 				    shared);
382a066e61fSBen Gardon 	}
383a066e61fSBen Gardon 
384a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
385a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
386a066e61fSBen Gardon 
3877cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
388a066e61fSBen Gardon }
389a066e61fSBen Gardon 
390a066e61fSBen Gardon /**
3912f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3922f2fad08SBen Gardon  * @kvm: kvm instance
3932f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3942f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3952f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3962f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3972f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3989a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3999a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
4009a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
4012f2fad08SBen Gardon  *
4022f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
4032f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
4042f2fad08SBen Gardon  */
4052f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4069a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
4079a77daacSBen Gardon 				  bool shared)
4082f2fad08SBen Gardon {
4092f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4102f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4112f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4122f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4132f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4142f2fad08SBen Gardon 
4152f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4162f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
417764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4182f2fad08SBen Gardon 
4192f2fad08SBen Gardon 	/*
4202f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4212f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4222f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
4232f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
4242f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4252f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4262f2fad08SBen Gardon 	 * thread before replacement.
4272f2fad08SBen Gardon 	 */
4282f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4292f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4302f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4312f2fad08SBen Gardon 		       "different PFN!\n"
4322f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4332f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4342f2fad08SBen Gardon 
4352f2fad08SBen Gardon 		/*
4362f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4372f2fad08SBen Gardon 		 * courruption.
4382f2fad08SBen Gardon 		 */
4392f2fad08SBen Gardon 		BUG();
4402f2fad08SBen Gardon 	}
4412f2fad08SBen Gardon 
4422f2fad08SBen Gardon 	if (old_spte == new_spte)
4432f2fad08SBen Gardon 		return;
4442f2fad08SBen Gardon 
445b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
446b9a98c34SBen Gardon 
4472f2fad08SBen Gardon 	/*
4482f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4492f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4502f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4512f2fad08SBen Gardon 	 */
4522f2fad08SBen Gardon 	if (!was_present && !is_present) {
4532f2fad08SBen Gardon 		/*
45408f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
45508f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
45608f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
45708f07c80SBen Gardon 		 * are nonpresent.
4582f2fad08SBen Gardon 		 */
45908f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
46008f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
46108f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4622f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4632f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4642f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
46508f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
46608f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4672f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4682f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4692f2fad08SBen Gardon 		return;
4702f2fad08SBen Gardon 	}
4712f2fad08SBen Gardon 
4722f2fad08SBen Gardon 
4732f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
47464bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4752f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4762f2fad08SBen Gardon 
4772f2fad08SBen Gardon 	/*
4782f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4792f2fad08SBen Gardon 	 * the paging structure.
4802f2fad08SBen Gardon 	 */
481a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
482a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4839a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4842f2fad08SBen Gardon }
4852f2fad08SBen Gardon 
4862f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4879a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4889a77daacSBen Gardon 				bool shared)
4892f2fad08SBen Gardon {
4909a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4919a77daacSBen Gardon 			      shared);
492f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
493a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
494a6a0b05dSBen Gardon 				      new_spte, level);
4952f2fad08SBen Gardon }
496faaf05b0SBen Gardon 
497fe43fa2fSBen Gardon /*
49824ae4cfaSBen Gardon  * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
49924ae4cfaSBen Gardon  * and handle the associated bookkeeping, but do not mark the page dirty
50024ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
5019a77daacSBen Gardon  *
5029a77daacSBen Gardon  * @kvm: kvm instance
5039a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5049a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5059a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
5069a77daacSBen Gardon  *	    this function will have no side-effects.
5079a77daacSBen Gardon  */
50824ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
5099a77daacSBen Gardon 							struct tdp_iter *iter,
5109a77daacSBen Gardon 							u64 new_spte)
5119a77daacSBen Gardon {
5129a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5139a77daacSBen Gardon 
51408f07c80SBen Gardon 	/*
51508f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
51608f07c80SBen Gardon 	 * may modify it.
51708f07c80SBen Gardon 	 */
5187a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
51908f07c80SBen Gardon 		return false;
52008f07c80SBen Gardon 
5219a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
5229a77daacSBen Gardon 		      new_spte) != iter->old_spte)
5239a77daacSBen Gardon 		return false;
5249a77daacSBen Gardon 
52524ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
52608889894SSean Christopherson 			      new_spte, iter->level, true);
52724ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5289a77daacSBen Gardon 
5299a77daacSBen Gardon 	return true;
5309a77daacSBen Gardon }
5319a77daacSBen Gardon 
53224ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
53324ae4cfaSBen Gardon 					   struct tdp_iter *iter,
53424ae4cfaSBen Gardon 					   u64 new_spte)
53524ae4cfaSBen Gardon {
53624ae4cfaSBen Gardon 	if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
53724ae4cfaSBen Gardon 		return false;
53824ae4cfaSBen Gardon 
53924ae4cfaSBen Gardon 	handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
54024ae4cfaSBen Gardon 				      iter->old_spte, new_spte, iter->level);
54124ae4cfaSBen Gardon 	return true;
54224ae4cfaSBen Gardon }
54324ae4cfaSBen Gardon 
54408f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
54508f07c80SBen Gardon 					   struct tdp_iter *iter)
54608f07c80SBen Gardon {
54708f07c80SBen Gardon 	/*
54808f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
54908f07c80SBen Gardon 	 * non-present value. This will stop other threads from
55008f07c80SBen Gardon 	 * immediately installing a present entry in its place
55108f07c80SBen Gardon 	 * before the TLBs are flushed.
55208f07c80SBen Gardon 	 */
55308f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
55408f07c80SBen Gardon 		return false;
55508f07c80SBen Gardon 
55608f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
55708f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
55808f07c80SBen Gardon 
55908f07c80SBen Gardon 	/*
56008f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
56108f07c80SBen Gardon 	 * must either wait on the MMU lock or use
56208f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
56308f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
56408f07c80SBen Gardon 	 * here since the SPTE is going from non-present
56508f07c80SBen Gardon 	 * to non-present.
56608f07c80SBen Gardon 	 */
56714f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
56808f07c80SBen Gardon 
56908f07c80SBen Gardon 	return true;
57008f07c80SBen Gardon }
57108f07c80SBen Gardon 
5729a77daacSBen Gardon 
5739a77daacSBen Gardon /*
574fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
575fe43fa2fSBen Gardon  * @kvm: kvm instance
576fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
577fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
578fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
579fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
580fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
581fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
582fe43fa2fSBen Gardon  *		      double counted.
583fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
584fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
585fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
586fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
587fe43fa2fSBen Gardon  *		      writes from being double counted.
588fe43fa2fSBen Gardon  */
589f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
590a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
591a6a0b05dSBen Gardon 				      bool record_dirty_log)
592faaf05b0SBen Gardon {
593531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5943a9a4aa5SBen Gardon 
59508f07c80SBen Gardon 	/*
59608f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
59708f07c80SBen Gardon 	 * temporary removed SPTE value.
59808f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
59908f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
60008f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
60108f07c80SBen Gardon 	 */
6027a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
60308f07c80SBen Gardon 
6047cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
605faaf05b0SBen Gardon 
60608889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
60708889894SSean Christopherson 			      new_spte, iter->level, false);
608f8e14497SBen Gardon 	if (record_acc_track)
609f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
610f8e14497SBen Gardon 					      iter->level);
611a6a0b05dSBen Gardon 	if (record_dirty_log)
61208889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
613a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
614a6a0b05dSBen Gardon 					      iter->level);
615f8e14497SBen Gardon }
616f8e14497SBen Gardon 
617f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
618f8e14497SBen Gardon 				    u64 new_spte)
619f8e14497SBen Gardon {
620a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
621f8e14497SBen Gardon }
622f8e14497SBen Gardon 
623f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
624f8e14497SBen Gardon 						 struct tdp_iter *iter,
625f8e14497SBen Gardon 						 u64 new_spte)
626f8e14497SBen Gardon {
627a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
628a6a0b05dSBen Gardon }
629a6a0b05dSBen Gardon 
630a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
631a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
632a6a0b05dSBen Gardon 						 u64 new_spte)
633a6a0b05dSBen Gardon {
634a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
635faaf05b0SBen Gardon }
636faaf05b0SBen Gardon 
637faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
638faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
639faaf05b0SBen Gardon 
640f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
641f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
642f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
643f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
644f8e14497SBen Gardon 			continue;					\
645f8e14497SBen Gardon 		else
646f8e14497SBen Gardon 
647bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
648bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
649bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
650bb18842eSBen Gardon 
651faaf05b0SBen Gardon /*
652e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
653e28a436cSBen Gardon  * to the scheduler.
654e28a436cSBen Gardon  *
655e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
656e139a34eSBen Gardon  * TLB flush before yielding.
657e139a34eSBen Gardon  *
658e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
659ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
660ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
661ed5e484bSBen Gardon  * paging structure root.
662e28a436cSBen Gardon  *
663e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
664e28a436cSBen Gardon  * Return false if a yield was not needed.
665e28a436cSBen Gardon  */
666e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
6676103bc07SBen Gardon 					     struct tdp_iter *iter, bool flush,
6686103bc07SBen Gardon 					     bool shared)
669a6a0b05dSBen Gardon {
670ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
671ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
672ed5e484bSBen Gardon 		return false;
673ed5e484bSBen Gardon 
674531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6757cca2d0bSBen Gardon 		rcu_read_unlock();
6767cca2d0bSBen Gardon 
677e139a34eSBen Gardon 		if (flush)
678e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
679e139a34eSBen Gardon 
6806103bc07SBen Gardon 		if (shared)
6816103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
6826103bc07SBen Gardon 		else
683531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6846103bc07SBen Gardon 
6857cca2d0bSBen Gardon 		rcu_read_lock();
686ed5e484bSBen Gardon 
687ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
688ed5e484bSBen Gardon 
689b601c3bcSBen Gardon 		tdp_iter_restart(iter);
690ed5e484bSBen Gardon 
691e28a436cSBen Gardon 		return true;
692a6a0b05dSBen Gardon 	}
693e28a436cSBen Gardon 
694e28a436cSBen Gardon 	return false;
695a6a0b05dSBen Gardon }
696a6a0b05dSBen Gardon 
697faaf05b0SBen Gardon /*
698faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
699faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
700faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
701faaf05b0SBen Gardon  * MMU lock.
7026103bc07SBen Gardon  *
703063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
704063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
705063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
706063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
7076103bc07SBen Gardon  * operation can cause a soft lockup.
7086103bc07SBen Gardon  *
7096103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7106103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7116103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7126103bc07SBen Gardon  * MMU lock in write mode.
713faaf05b0SBen Gardon  */
714faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7156103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7166103bc07SBen Gardon 			  bool shared)
717faaf05b0SBen Gardon {
718faaf05b0SBen Gardon 	struct tdp_iter iter;
719faaf05b0SBen Gardon 
7206103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
7216103bc07SBen Gardon 
7227cca2d0bSBen Gardon 	rcu_read_lock();
7237cca2d0bSBen Gardon 
724faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
7256103bc07SBen Gardon retry:
7261af4a960SBen Gardon 		if (can_yield &&
7276103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
728a835429cSSean Christopherson 			flush = false;
7291af4a960SBen Gardon 			continue;
7301af4a960SBen Gardon 		}
7311af4a960SBen Gardon 
732faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
733faaf05b0SBen Gardon 			continue;
734faaf05b0SBen Gardon 
735faaf05b0SBen Gardon 		/*
736faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
737faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
738faaf05b0SBen Gardon 		 * lower level.
739faaf05b0SBen Gardon 		 */
740faaf05b0SBen Gardon 		if ((iter.gfn < start ||
741faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
742faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
743faaf05b0SBen Gardon 			continue;
744faaf05b0SBen Gardon 
7456103bc07SBen Gardon 		if (!shared) {
746faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
747a835429cSSean Christopherson 			flush = true;
7486103bc07SBen Gardon 		} else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
7496103bc07SBen Gardon 			/*
7506103bc07SBen Gardon 			 * The iter must explicitly re-read the SPTE because
7516103bc07SBen Gardon 			 * the atomic cmpxchg failed.
7526103bc07SBen Gardon 			 */
7536103bc07SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
7546103bc07SBen Gardon 			goto retry;
7556103bc07SBen Gardon 		}
756faaf05b0SBen Gardon 	}
7577cca2d0bSBen Gardon 
7587cca2d0bSBen Gardon 	rcu_read_unlock();
759a835429cSSean Christopherson 	return flush;
760faaf05b0SBen Gardon }
761faaf05b0SBen Gardon 
762faaf05b0SBen Gardon /*
763faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
764faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
765faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
766faaf05b0SBen Gardon  * MMU lock.
7676103bc07SBen Gardon  *
7686103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7696103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7706103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7716103bc07SBen Gardon  * MMU in write mode.
772faaf05b0SBen Gardon  */
7732b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7746103bc07SBen Gardon 				 gfn_t end, bool can_yield, bool flush,
7756103bc07SBen Gardon 				 bool shared)
776faaf05b0SBen Gardon {
777faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
778faaf05b0SBen Gardon 
7796103bc07SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
7806103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
7816103bc07SBen Gardon 				      shared);
782faaf05b0SBen Gardon 
783faaf05b0SBen Gardon 	return flush;
784faaf05b0SBen Gardon }
785faaf05b0SBen Gardon 
786faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
787faaf05b0SBen Gardon {
788339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
7892b9663d8SSean Christopherson 	bool flush = false;
7902b9663d8SSean Christopherson 	int i;
791faaf05b0SBen Gardon 
7922b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7936103bc07SBen Gardon 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
7946103bc07SBen Gardon 						  flush, false);
7952b9663d8SSean Christopherson 
796faaf05b0SBen Gardon 	if (flush)
797faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
798faaf05b0SBen Gardon }
799bb18842eSBen Gardon 
800bb18842eSBen Gardon /*
801*b7cccd39SBen Gardon  * Mark each TDP MMU root as invalid so that other threads
802*b7cccd39SBen Gardon  * will drop their references and allow the root count to
803*b7cccd39SBen Gardon  * go to 0.
804*b7cccd39SBen Gardon  *
805*b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
806*b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
807*b7cccd39SBen Gardon  */
808*b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
809*b7cccd39SBen Gardon {
810*b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
811*b7cccd39SBen Gardon 
812*b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
813*b7cccd39SBen Gardon 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
814*b7cccd39SBen Gardon 		root->role.invalid = true;
815*b7cccd39SBen Gardon }
816*b7cccd39SBen Gardon 
817*b7cccd39SBen Gardon /*
818bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
819bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
820bb18842eSBen Gardon  */
821bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
822bb18842eSBen Gardon 					  int map_writable,
823bb18842eSBen Gardon 					  struct tdp_iter *iter,
824bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
825bb18842eSBen Gardon {
826bb18842eSBen Gardon 	u64 new_spte;
827bb18842eSBen Gardon 	int ret = 0;
828bb18842eSBen Gardon 	int make_spte_ret = 0;
829bb18842eSBen Gardon 
8309a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
831bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
8329a77daacSBen Gardon 	else
833bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
834bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
835bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
836bb18842eSBen Gardon 					 &new_spte);
837bb18842eSBen Gardon 
838bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
839bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
8409a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
8419a77daacSBen Gardon 		return RET_PF_RETRY;
842bb18842eSBen Gardon 
843bb18842eSBen Gardon 	/*
844bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
845bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
846bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
847bb18842eSBen Gardon 	 */
848bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
849bb18842eSBen Gardon 		if (write)
850bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
851bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
852bb18842eSBen Gardon 	}
853bb18842eSBen Gardon 
854bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
8559a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
8569a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
8579a77daacSBen Gardon 				     new_spte);
858bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
8593849e092SSean Christopherson 	} else {
8609a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
8619a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
8623849e092SSean Christopherson 	}
863bb18842eSBen Gardon 
864bb18842eSBen Gardon 	if (!prefault)
865bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
866bb18842eSBen Gardon 
867bb18842eSBen Gardon 	return ret;
868bb18842eSBen Gardon }
869bb18842eSBen Gardon 
870bb18842eSBen Gardon /*
871bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
872bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
873bb18842eSBen Gardon  */
874bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
875bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
876bb18842eSBen Gardon 		    bool prefault)
877bb18842eSBen Gardon {
878bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
879bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
880bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
881bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
882bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
883bb18842eSBen Gardon 	struct tdp_iter iter;
88489c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
885bb18842eSBen Gardon 	u64 *child_pt;
886bb18842eSBen Gardon 	u64 new_spte;
887bb18842eSBen Gardon 	int ret;
888bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
889bb18842eSBen Gardon 	int level;
890bb18842eSBen Gardon 	int req_level;
891bb18842eSBen Gardon 
892bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
893bb18842eSBen Gardon 		return RET_PF_RETRY;
894bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
895bb18842eSBen Gardon 		return RET_PF_RETRY;
896bb18842eSBen Gardon 
897bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
898bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
899bb18842eSBen Gardon 
900bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
9017cca2d0bSBen Gardon 
9027cca2d0bSBen Gardon 	rcu_read_lock();
9037cca2d0bSBen Gardon 
904bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
905bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
906bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
907bb18842eSBen Gardon 						   iter.level, &pfn, &level);
908bb18842eSBen Gardon 
909bb18842eSBen Gardon 		if (iter.level == level)
910bb18842eSBen Gardon 			break;
911bb18842eSBen Gardon 
912bb18842eSBen Gardon 		/*
913bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
914bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
915bb18842eSBen Gardon 		 * with a non-leaf SPTE.
916bb18842eSBen Gardon 		 */
917bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
918bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
91908f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
9209a77daacSBen Gardon 				break;
921bb18842eSBen Gardon 
922bb18842eSBen Gardon 			/*
923bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
924bb18842eSBen Gardon 			 * because the new value informs the !present
925bb18842eSBen Gardon 			 * path below.
926bb18842eSBen Gardon 			 */
9277cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
928bb18842eSBen Gardon 		}
929bb18842eSBen Gardon 
930bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
93189c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
93289c0fd49SBen Gardon 			child_pt = sp->spt;
933a9442f59SBen Gardon 
934bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
935bb18842eSBen Gardon 						     !shadow_accessed_mask);
936bb18842eSBen Gardon 
9379a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
9389a77daacSBen Gardon 						    new_spte)) {
9399a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
9409a77daacSBen Gardon 						  huge_page_disallowed &&
9419a77daacSBen Gardon 						  req_level >= iter.level);
9429a77daacSBen Gardon 
943bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
9449a77daacSBen Gardon 			} else {
9459a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
9469a77daacSBen Gardon 				break;
9479a77daacSBen Gardon 			}
948bb18842eSBen Gardon 		}
949bb18842eSBen Gardon 	}
950bb18842eSBen Gardon 
9519a77daacSBen Gardon 	if (iter.level != level) {
9527cca2d0bSBen Gardon 		rcu_read_unlock();
953bb18842eSBen Gardon 		return RET_PF_RETRY;
9547cca2d0bSBen Gardon 	}
955bb18842eSBen Gardon 
956bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
957bb18842eSBen Gardon 					      pfn, prefault);
9587cca2d0bSBen Gardon 	rcu_read_unlock();
959bb18842eSBen Gardon 
960bb18842eSBen Gardon 	return ret;
961bb18842eSBen Gardon }
962063afacdSBen Gardon 
9633039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
9643039bcc7SSean Christopherson 				 bool flush)
9653039bcc7SSean Christopherson {
9663039bcc7SSean Christopherson 	struct kvm_mmu_page *root;
967c1b91493SSean Christopherson 
9683039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
9693039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
9706103bc07SBen Gardon 				       range->may_block, flush, false);
9713039bcc7SSean Christopherson 
9723039bcc7SSean Christopherson 	return flush;
9733039bcc7SSean Christopherson }
9743039bcc7SSean Christopherson 
9753039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
9763039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
9773039bcc7SSean Christopherson 
9783039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
9793039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
980c1b91493SSean Christopherson 						   tdp_handler_t handler)
981063afacdSBen Gardon {
982063afacdSBen Gardon 	struct kvm_mmu_page *root;
9833039bcc7SSean Christopherson 	struct tdp_iter iter;
9843039bcc7SSean Christopherson 	bool ret = false;
985063afacdSBen Gardon 
9863039bcc7SSean Christopherson 	rcu_read_lock();
987063afacdSBen Gardon 
988e1eed584SSean Christopherson 	/*
989e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
990e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
991e1eed584SSean Christopherson 	 */
9923039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
9933039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
9943039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
9953039bcc7SSean Christopherson 	}
996063afacdSBen Gardon 
9973039bcc7SSean Christopherson 	rcu_read_unlock();
998063afacdSBen Gardon 
999063afacdSBen Gardon 	return ret;
1000063afacdSBen Gardon }
1001063afacdSBen Gardon 
1002f8e14497SBen Gardon /*
1003f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1004f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1005f8e14497SBen Gardon  */
10063039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
10073039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1008f8e14497SBen Gardon {
10093039bcc7SSean Christopherson 	u64 new_spte = 0;
1010f8e14497SBen Gardon 
10113039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
10123039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
10133039bcc7SSean Christopherson 		return false;
10147cca2d0bSBen Gardon 
10153039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1016f8e14497SBen Gardon 
1017f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
10188f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1019f8e14497SBen Gardon 	} else {
1020f8e14497SBen Gardon 		/*
1021f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1022f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1023f8e14497SBen Gardon 		 */
1024f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1025f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1026f8e14497SBen Gardon 
1027f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1028f8e14497SBen Gardon 	}
1029f8e14497SBen Gardon 
10303039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
10313039bcc7SSean Christopherson 
10323039bcc7SSean Christopherson 	return true;
1033f8e14497SBen Gardon }
1034f8e14497SBen Gardon 
10353039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1036f8e14497SBen Gardon {
10373039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1038f8e14497SBen Gardon }
1039f8e14497SBen Gardon 
10403039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
10413039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1042f8e14497SBen Gardon {
10433039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1044f8e14497SBen Gardon }
1045f8e14497SBen Gardon 
10463039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1047f8e14497SBen Gardon {
10483039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
10493039bcc7SSean Christopherson }
10503039bcc7SSean Christopherson 
10513039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
10523039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
10533039bcc7SSean Christopherson {
10543039bcc7SSean Christopherson 	u64 new_spte;
10553039bcc7SSean Christopherson 
10563039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
10573039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
10583039bcc7SSean Christopherson 
10593039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
10603039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
10613039bcc7SSean Christopherson 		return false;
10623039bcc7SSean Christopherson 
10633039bcc7SSean Christopherson 	/*
10643039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
10653039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
10663039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
10673039bcc7SSean Christopherson 	 * See __handle_changed_spte().
10683039bcc7SSean Christopherson 	 */
10693039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
10703039bcc7SSean Christopherson 
10713039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
10723039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
10733039bcc7SSean Christopherson 								  pte_pfn(range->pte));
10743039bcc7SSean Christopherson 
10753039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
10763039bcc7SSean Christopherson 	}
10773039bcc7SSean Christopherson 
10783039bcc7SSean Christopherson 	return true;
1079f8e14497SBen Gardon }
10801d8dd6b3SBen Gardon 
10811d8dd6b3SBen Gardon /*
10821d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
10831d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
10841d8dd6b3SBen Gardon  * notifier.
10851d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
10861d8dd6b3SBen Gardon  */
10873039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
10881d8dd6b3SBen Gardon {
10893039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
10901d8dd6b3SBen Gardon 
10913039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
10923039bcc7SSean Christopherson 	if (flush)
10933039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
10947cca2d0bSBen Gardon 
10953039bcc7SSean Christopherson 	return false;
10961d8dd6b3SBen Gardon }
10971d8dd6b3SBen Gardon 
1098a6a0b05dSBen Gardon /*
1099a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1100a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1101a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1102a6a0b05dSBen Gardon  */
1103a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1104a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1105a6a0b05dSBen Gardon {
1106a6a0b05dSBen Gardon 	struct tdp_iter iter;
1107a6a0b05dSBen Gardon 	u64 new_spte;
1108a6a0b05dSBen Gardon 	bool spte_set = false;
1109a6a0b05dSBen Gardon 
11107cca2d0bSBen Gardon 	rcu_read_lock();
11117cca2d0bSBen Gardon 
1112a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1113a6a0b05dSBen Gardon 
1114a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1115a6a0b05dSBen Gardon 				   min_level, start, end) {
111624ae4cfaSBen Gardon retry:
111724ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
11181af4a960SBen Gardon 			continue;
11191af4a960SBen Gardon 
1120a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
11210f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
11220f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1123a6a0b05dSBen Gardon 			continue;
1124a6a0b05dSBen Gardon 
1125a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1126a6a0b05dSBen Gardon 
112724ae4cfaSBen Gardon 		if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
112824ae4cfaSBen Gardon 							  new_spte)) {
112924ae4cfaSBen Gardon 			/*
113024ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
113124ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
113224ae4cfaSBen Gardon 			 */
113324ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
113424ae4cfaSBen Gardon 			goto retry;
113524ae4cfaSBen Gardon 		}
1136a6a0b05dSBen Gardon 		spte_set = true;
1137a6a0b05dSBen Gardon 	}
11387cca2d0bSBen Gardon 
11397cca2d0bSBen Gardon 	rcu_read_unlock();
1140a6a0b05dSBen Gardon 	return spte_set;
1141a6a0b05dSBen Gardon }
1142a6a0b05dSBen Gardon 
1143a6a0b05dSBen Gardon /*
1144a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1145a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1146a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1147a6a0b05dSBen Gardon  */
1148a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1149a6a0b05dSBen Gardon 			     int min_level)
1150a6a0b05dSBen Gardon {
1151a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1152a6a0b05dSBen Gardon 	bool spte_set = false;
1153a6a0b05dSBen Gardon 
115424ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
115524ae4cfaSBen Gardon 
115624ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1157a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1158a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1159a6a0b05dSBen Gardon 
1160a6a0b05dSBen Gardon 	return spte_set;
1161a6a0b05dSBen Gardon }
1162a6a0b05dSBen Gardon 
1163a6a0b05dSBen Gardon /*
1164a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1165a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1166a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1167a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1168a6a0b05dSBen Gardon  * be flushed.
1169a6a0b05dSBen Gardon  */
1170a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1171a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1172a6a0b05dSBen Gardon {
1173a6a0b05dSBen Gardon 	struct tdp_iter iter;
1174a6a0b05dSBen Gardon 	u64 new_spte;
1175a6a0b05dSBen Gardon 	bool spte_set = false;
1176a6a0b05dSBen Gardon 
11777cca2d0bSBen Gardon 	rcu_read_lock();
11787cca2d0bSBen Gardon 
1179a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
118024ae4cfaSBen Gardon retry:
118124ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
11821af4a960SBen Gardon 			continue;
11831af4a960SBen Gardon 
1184a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1185a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1186a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1187a6a0b05dSBen Gardon 			else
1188a6a0b05dSBen Gardon 				continue;
1189a6a0b05dSBen Gardon 		} else {
1190a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1191a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1192a6a0b05dSBen Gardon 			else
1193a6a0b05dSBen Gardon 				continue;
1194a6a0b05dSBen Gardon 		}
1195a6a0b05dSBen Gardon 
119624ae4cfaSBen Gardon 		if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
119724ae4cfaSBen Gardon 							  new_spte)) {
119824ae4cfaSBen Gardon 			/*
119924ae4cfaSBen Gardon 			 * The iter must explicitly re-read the SPTE because
120024ae4cfaSBen Gardon 			 * the atomic cmpxchg failed.
120124ae4cfaSBen Gardon 			 */
120224ae4cfaSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
120324ae4cfaSBen Gardon 			goto retry;
120424ae4cfaSBen Gardon 		}
1205a6a0b05dSBen Gardon 		spte_set = true;
1206a6a0b05dSBen Gardon 	}
12077cca2d0bSBen Gardon 
12087cca2d0bSBen Gardon 	rcu_read_unlock();
1209a6a0b05dSBen Gardon 	return spte_set;
1210a6a0b05dSBen Gardon }
1211a6a0b05dSBen Gardon 
1212a6a0b05dSBen Gardon /*
1213a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1214a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1215a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1216a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1217a6a0b05dSBen Gardon  * be flushed.
1218a6a0b05dSBen Gardon  */
1219a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1220a6a0b05dSBen Gardon {
1221a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1222a6a0b05dSBen Gardon 	bool spte_set = false;
1223a6a0b05dSBen Gardon 
122424ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
122524ae4cfaSBen Gardon 
122624ae4cfaSBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1227a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1228a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1229a6a0b05dSBen Gardon 
1230a6a0b05dSBen Gardon 	return spte_set;
1231a6a0b05dSBen Gardon }
1232a6a0b05dSBen Gardon 
1233a6a0b05dSBen Gardon /*
1234a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1235a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1236a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1237a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1238a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1239a6a0b05dSBen Gardon  */
1240a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1241a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1242a6a0b05dSBen Gardon {
1243a6a0b05dSBen Gardon 	struct tdp_iter iter;
1244a6a0b05dSBen Gardon 	u64 new_spte;
1245a6a0b05dSBen Gardon 
12467cca2d0bSBen Gardon 	rcu_read_lock();
12477cca2d0bSBen Gardon 
1248a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1249a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1250a6a0b05dSBen Gardon 		if (!mask)
1251a6a0b05dSBen Gardon 			break;
1252a6a0b05dSBen Gardon 
1253a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1254a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1255a6a0b05dSBen Gardon 			continue;
1256a6a0b05dSBen Gardon 
1257f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1258f1b3b06aSBen Gardon 
1259a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1260a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1261a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1262a6a0b05dSBen Gardon 			else
1263a6a0b05dSBen Gardon 				continue;
1264a6a0b05dSBen Gardon 		} else {
1265a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1266a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1267a6a0b05dSBen Gardon 			else
1268a6a0b05dSBen Gardon 				continue;
1269a6a0b05dSBen Gardon 		}
1270a6a0b05dSBen Gardon 
1271a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1272a6a0b05dSBen Gardon 	}
12737cca2d0bSBen Gardon 
12747cca2d0bSBen Gardon 	rcu_read_unlock();
1275a6a0b05dSBen Gardon }
1276a6a0b05dSBen Gardon 
1277a6a0b05dSBen Gardon /*
1278a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1279a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1280a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1281a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1282a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1283a6a0b05dSBen Gardon  */
1284a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1285a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1286a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1287a6a0b05dSBen Gardon 				       bool wrprot)
1288a6a0b05dSBen Gardon {
1289a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1290a6a0b05dSBen Gardon 
1291531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1292a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1293a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1294a6a0b05dSBen Gardon }
1295a6a0b05dSBen Gardon 
1296a6a0b05dSBen Gardon /*
129787aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
129887aa9ec9SBen Gardon  * GFNs within the slot.
129914881998SBen Gardon  */
1300af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
130114881998SBen Gardon 				       struct kvm_mmu_page *root,
13028ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1303af95b53eSSean Christopherson 				       bool flush)
130414881998SBen Gardon {
13059eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
13069eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
130714881998SBen Gardon 	struct tdp_iter iter;
130814881998SBen Gardon 	kvm_pfn_t pfn;
130914881998SBen Gardon 
13107cca2d0bSBen Gardon 	rcu_read_lock();
13117cca2d0bSBen Gardon 
131214881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
13132db6f772SBen Gardon retry:
13142db6f772SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1315af95b53eSSean Christopherson 			flush = false;
13161af4a960SBen Gardon 			continue;
13171af4a960SBen Gardon 		}
13181af4a960SBen Gardon 
131914881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
132087aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
132114881998SBen Gardon 			continue;
132214881998SBen Gardon 
132314881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
132414881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
13259eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
13269eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
132714881998SBen Gardon 			continue;
132814881998SBen Gardon 
13292db6f772SBen Gardon 		if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
13302db6f772SBen Gardon 			/*
13312db6f772SBen Gardon 			 * The iter must explicitly re-read the SPTE because
13322db6f772SBen Gardon 			 * the atomic cmpxchg failed.
13332db6f772SBen Gardon 			 */
13342db6f772SBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
13352db6f772SBen Gardon 			goto retry;
13362db6f772SBen Gardon 		}
1337af95b53eSSean Christopherson 		flush = true;
133814881998SBen Gardon 	}
133914881998SBen Gardon 
13407cca2d0bSBen Gardon 	rcu_read_unlock();
1341af95b53eSSean Christopherson 
1342af95b53eSSean Christopherson 	return flush;
134314881998SBen Gardon }
134414881998SBen Gardon 
134514881998SBen Gardon /*
134614881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
134714881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
134814881998SBen Gardon  */
1349142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
13508ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
13518ca6f063SBen Gardon 				       bool flush)
135214881998SBen Gardon {
135314881998SBen Gardon 	struct kvm_mmu_page *root;
135414881998SBen Gardon 
13552db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
13562db6f772SBen Gardon 
13572db6f772SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1358af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1359af95b53eSSean Christopherson 
1360142ccde1SSean Christopherson 	return flush;
136114881998SBen Gardon }
136246044f72SBen Gardon 
136346044f72SBen Gardon /*
136446044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
13655fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
136646044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
136746044f72SBen Gardon  */
136846044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
136946044f72SBen Gardon 			      gfn_t gfn)
137046044f72SBen Gardon {
137146044f72SBen Gardon 	struct tdp_iter iter;
137246044f72SBen Gardon 	u64 new_spte;
137346044f72SBen Gardon 	bool spte_set = false;
137446044f72SBen Gardon 
13757cca2d0bSBen Gardon 	rcu_read_lock();
13767cca2d0bSBen Gardon 
137746044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
137846044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
137946044f72SBen Gardon 			break;
138046044f72SBen Gardon 
138146044f72SBen Gardon 		new_spte = iter.old_spte &
13825fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
138346044f72SBen Gardon 
138446044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
138546044f72SBen Gardon 		spte_set = true;
138646044f72SBen Gardon 	}
138746044f72SBen Gardon 
13887cca2d0bSBen Gardon 	rcu_read_unlock();
13897cca2d0bSBen Gardon 
139046044f72SBen Gardon 	return spte_set;
139146044f72SBen Gardon }
139246044f72SBen Gardon 
139346044f72SBen Gardon /*
139446044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
13955fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
139646044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
139746044f72SBen Gardon  */
139846044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
139946044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
140046044f72SBen Gardon {
140146044f72SBen Gardon 	struct kvm_mmu_page *root;
140246044f72SBen Gardon 	bool spte_set = false;
140346044f72SBen Gardon 
1404531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1405a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
140646044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
1407a3f15bdaSSean Christopherson 
140846044f72SBen Gardon 	return spte_set;
140946044f72SBen Gardon }
141046044f72SBen Gardon 
141195fb5b02SBen Gardon /*
141295fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
141395fb5b02SBen Gardon  * That SPTE may be non-present.
141495fb5b02SBen Gardon  */
141539b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
141639b4d43eSSean Christopherson 			 int *root_level)
141795fb5b02SBen Gardon {
141895fb5b02SBen Gardon 	struct tdp_iter iter;
141995fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
142095fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
14212aa07893SSean Christopherson 	int leaf = -1;
142295fb5b02SBen Gardon 
142339b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
142495fb5b02SBen Gardon 
14257cca2d0bSBen Gardon 	rcu_read_lock();
14267cca2d0bSBen Gardon 
142795fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
142895fb5b02SBen Gardon 		leaf = iter.level;
1429dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
143095fb5b02SBen Gardon 	}
143195fb5b02SBen Gardon 
14327cca2d0bSBen Gardon 	rcu_read_unlock();
14337cca2d0bSBen Gardon 
143495fb5b02SBen Gardon 	return leaf;
143595fb5b02SBen Gardon }
1436