xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 7b7e1ab6)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
336103bc07SBen Gardon 							     bool shared)
346103bc07SBen Gardon {
356103bc07SBen Gardon 	if (shared)
366103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
376103bc07SBen Gardon 	else
386103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
396103bc07SBen Gardon }
406103bc07SBen Gardon 
41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42fe5db27dSBen Gardon {
43fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
44fe5db27dSBen Gardon 		return;
4502c00b3aSBen Gardon 
46524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
4702c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
487cca2d0bSBen Gardon 
497cca2d0bSBen Gardon 	/*
507cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
517cca2d0bSBen Gardon 	 * can run before the VM is torn down.
527cca2d0bSBen Gardon 	 */
537cca2d0bSBen Gardon 	rcu_barrier();
5402c00b3aSBen Gardon }
5502c00b3aSBen Gardon 
562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
576103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
586103bc07SBen Gardon 			  bool shared);
592bdb3d84SBen Gardon 
602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
61a889ea54SBen Gardon {
622bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
632bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
64a889ea54SBen Gardon }
65a889ea54SBen Gardon 
66c0e64238SBen Gardon /*
67c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
68c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
69c0e64238SBen Gardon  * the memory.
70c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
71c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
72c0e64238SBen Gardon  * memory won't use it after it is freed.
73c0e64238SBen Gardon  */
74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
75a889ea54SBen Gardon {
76c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
77c0e64238SBen Gardon 					       rcu_head);
78a889ea54SBen Gardon 
79c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
80a889ea54SBen Gardon }
81a889ea54SBen Gardon 
826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
836103bc07SBen Gardon 			  bool shared)
842bdb3d84SBen Gardon {
856103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
862bdb3d84SBen Gardon 
8711cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
882bdb3d84SBen Gardon 		return;
892bdb3d84SBen Gardon 
902bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
912bdb3d84SBen Gardon 
92c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93c0e64238SBen Gardon 	list_del_rcu(&root->link);
94c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
952bdb3d84SBen Gardon 
96524a1e4eSSean Christopherson 	zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
972bdb3d84SBen Gardon 
98c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
99a889ea54SBen Gardon }
100a889ea54SBen Gardon 
101cfc10997SBen Gardon /*
102d62007edSSean Christopherson  * Returns the next root after @prev_root (or the first root if @prev_root is
103d62007edSSean Christopherson  * NULL).  A reference to the returned root is acquired, and the reference to
104d62007edSSean Christopherson  * @prev_root is released (the caller obviously must hold a reference to
105d62007edSSean Christopherson  * @prev_root if it's non-NULL).
106d62007edSSean Christopherson  *
107d62007edSSean Christopherson  * If @only_valid is true, invalid roots are skipped.
108d62007edSSean Christopherson  *
109d62007edSSean Christopherson  * Returns NULL if the end of tdp_mmu_roots was reached.
110cfc10997SBen Gardon  */
111cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1126103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
113d62007edSSean Christopherson 					      bool shared, bool only_valid)
114a889ea54SBen Gardon {
115a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
116a889ea54SBen Gardon 
117c0e64238SBen Gardon 	rcu_read_lock();
118c0e64238SBen Gardon 
119cfc10997SBen Gardon 	if (prev_root)
120c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121c0e64238SBen Gardon 						  &prev_root->link,
122c0e64238SBen Gardon 						  typeof(*prev_root), link);
123cfc10997SBen Gardon 	else
124c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
125cfc10997SBen Gardon 						   typeof(*next_root), link);
126cfc10997SBen Gardon 
12704dc4e6cSSean Christopherson 	while (next_root) {
128d62007edSSean Christopherson 		if ((!only_valid || !next_root->role.invalid) &&
129ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(next_root))
13004dc4e6cSSean Christopherson 			break;
13104dc4e6cSSean Christopherson 
132c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
133c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
13404dc4e6cSSean Christopherson 	}
135fb101293SBen Gardon 
136c0e64238SBen Gardon 	rcu_read_unlock();
137cfc10997SBen Gardon 
138cfc10997SBen Gardon 	if (prev_root)
1396103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
140cfc10997SBen Gardon 
141a889ea54SBen Gardon 	return next_root;
142a889ea54SBen Gardon }
143a889ea54SBen Gardon 
144a889ea54SBen Gardon /*
145a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
146a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
147a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
148a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1496103bc07SBen Gardon  *
1506103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1516103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1526103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
153a889ea54SBen Gardon  */
154d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
155d62007edSSean Christopherson 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
156cfc10997SBen Gardon 	     _root;								\
157d62007edSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
158a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {			\
159a3f15bdaSSean Christopherson 		} else
160a889ea54SBen Gardon 
161d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
162d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
163d62007edSSean Christopherson 
164d62007edSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)		\
165d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false)
166d62007edSSean Christopherson 
167a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)				\
168c0e64238SBen Gardon 	list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,		\
169c0e64238SBen Gardon 				lockdep_is_held_type(&kvm->mmu_lock, 0) ||	\
170c0e64238SBen Gardon 				lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock))	\
171a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
172a3f15bdaSSean Christopherson 		} else
17302c00b3aSBen Gardon 
17402c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
17502c00b3aSBen Gardon 						   int level)
17602c00b3aSBen Gardon {
17702c00b3aSBen Gardon 	union kvm_mmu_page_role role;
17802c00b3aSBen Gardon 
17902c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
18002c00b3aSBen Gardon 	role.level = level;
18102c00b3aSBen Gardon 	role.direct = true;
182bb3b394dSLai Jiangshan 	role.has_4_byte_gpte = false;
18302c00b3aSBen Gardon 	role.access = ACC_ALL;
18487e888eaSPaolo Bonzini 	role.ad_disabled = !shadow_accessed_mask;
18502c00b3aSBen Gardon 
18602c00b3aSBen Gardon 	return role;
18702c00b3aSBen Gardon }
18802c00b3aSBen Gardon 
189c298a30cSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
19002c00b3aSBen Gardon 					     int level)
19102c00b3aSBen Gardon {
19202c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
19302c00b3aSBen Gardon 
19402c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
19502c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
19602c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
19702c00b3aSBen Gardon 
19802c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
19902c00b3aSBen Gardon 	sp->gfn = gfn;
20002c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
20102c00b3aSBen Gardon 
20233dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
20333dd3574SBen Gardon 
20402c00b3aSBen Gardon 	return sp;
20502c00b3aSBen Gardon }
20602c00b3aSBen Gardon 
2076e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
20802c00b3aSBen Gardon {
20902c00b3aSBen Gardon 	union kvm_mmu_page_role role;
21002c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
21102c00b3aSBen Gardon 	struct kvm_mmu_page *root;
21202c00b3aSBen Gardon 
2136e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
21402c00b3aSBen Gardon 
21502c00b3aSBen Gardon 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
21602c00b3aSBen Gardon 
21704dc4e6cSSean Christopherson 	/*
21804dc4e6cSSean Christopherson 	 * Check for an existing root before allocating a new one.  Note, the
21904dc4e6cSSean Christopherson 	 * role check prevents consuming an invalid root.
22004dc4e6cSSean Christopherson 	 */
221a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
222fb101293SBen Gardon 		if (root->role.word == role.word &&
223ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(root))
2246e6ec584SSean Christopherson 			goto out;
22502c00b3aSBen Gardon 	}
22602c00b3aSBen Gardon 
227c298a30cSDavid Matlack 	root = tdp_mmu_alloc_sp(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
22811cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
22902c00b3aSBen Gardon 
230c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
231c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
232c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
23302c00b3aSBen Gardon 
2346e6ec584SSean Christopherson out:
23502c00b3aSBen Gardon 	return __pa(root->spt);
236fe5db27dSBen Gardon }
2372f2fad08SBen Gardon 
2382f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2399a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2409a77daacSBen Gardon 				bool shared);
2412f2fad08SBen Gardon 
242f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
243f8e14497SBen Gardon {
244f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
245f8e14497SBen Gardon 		return;
246f8e14497SBen Gardon 
247f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
24864bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
24964bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
250f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
251f8e14497SBen Gardon }
252f8e14497SBen Gardon 
253a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
254a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
255a6a0b05dSBen Gardon {
256a6a0b05dSBen Gardon 	bool pfn_changed;
257a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
258a6a0b05dSBen Gardon 
259a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
260a6a0b05dSBen Gardon 		return;
261a6a0b05dSBen Gardon 
262a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
263a6a0b05dSBen Gardon 
264a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
265a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
266a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
267fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
268a6a0b05dSBen Gardon 	}
269a6a0b05dSBen Gardon }
270a6a0b05dSBen Gardon 
2712f2fad08SBen Gardon /**
272c298a30cSDavid Matlack  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
273a9442f59SBen Gardon  *
274a9442f59SBen Gardon  * @kvm: kvm instance
275a9442f59SBen Gardon  * @sp: the page to be removed
2769a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2779a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2789a77daacSBen Gardon  *	    threads that might be adding or removing pages.
279a9442f59SBen Gardon  */
280c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
2819a77daacSBen Gardon 			      bool shared)
282a9442f59SBen Gardon {
2839a77daacSBen Gardon 	if (shared)
2849a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2859a77daacSBen Gardon 	else
286a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
287a9442f59SBen Gardon 
288a9442f59SBen Gardon 	list_del(&sp->link);
289a9442f59SBen Gardon 	if (sp->lpage_disallowed)
290a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2919a77daacSBen Gardon 
2929a77daacSBen Gardon 	if (shared)
2939a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
294a9442f59SBen Gardon }
295a9442f59SBen Gardon 
296a9442f59SBen Gardon /**
2970f53dfa3SDavid Matlack  * handle_removed_pt() - handle a page table removed from the TDP structure
298a066e61fSBen Gardon  *
299a066e61fSBen Gardon  * @kvm: kvm instance
300a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3019a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3029a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3039a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
304a066e61fSBen Gardon  *
305a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
306a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
30770fb3e41SBen Gardon  *
30870fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
30970fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
31070fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
31170fb3e41SBen Gardon  * early rcu_dereferences in the function.
312a066e61fSBen Gardon  */
3130f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
314a066e61fSBen Gardon {
31570fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
316a066e61fSBen Gardon 	int level = sp->role.level;
317e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
318a066e61fSBen Gardon 	int i;
319a066e61fSBen Gardon 
320a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
321a066e61fSBen Gardon 
322c298a30cSDavid Matlack 	tdp_mmu_unlink_sp(kvm, sp, shared);
323a066e61fSBen Gardon 
324a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
325574c3c55SBen Gardon 		u64 *sptep = rcu_dereference(pt) + i;
326574c3c55SBen Gardon 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
327574c3c55SBen Gardon 		u64 old_child_spte;
3289a77daacSBen Gardon 
3299a77daacSBen Gardon 		if (shared) {
330e25f0e0cSBen Gardon 			/*
331e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
332e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
333e25f0e0cSBen Gardon 			 * already marked as removed then another thread
334e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
335e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
336e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
337e25f0e0cSBen Gardon 			 */
338e25f0e0cSBen Gardon 			for (;;) {
339e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
340e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
341e25f0e0cSBen Gardon 					break;
342e25f0e0cSBen Gardon 				cpu_relax();
343e25f0e0cSBen Gardon 			}
3449a77daacSBen Gardon 		} else {
3458df9f1afSSean Christopherson 			/*
3468df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3478df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3488df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3498df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3508df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3518df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3528df9f1afSSean Christopherson 			 * unreachable.
3538df9f1afSSean Christopherson 			 */
3549a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3558df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3568df9f1afSSean Christopherson 				continue;
357e25f0e0cSBen Gardon 
358e25f0e0cSBen Gardon 			/*
359e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
360e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
361e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
362e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
363e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
364e25f0e0cSBen Gardon 			 * the function.
365e25f0e0cSBen Gardon 			 */
366e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3679a77daacSBen Gardon 		}
368e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
369f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
370e25f0e0cSBen Gardon 				    shared);
371a066e61fSBen Gardon 	}
372a066e61fSBen Gardon 
373574c3c55SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
374f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
375a066e61fSBen Gardon 
3767cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
377a066e61fSBen Gardon }
378a066e61fSBen Gardon 
379a066e61fSBen Gardon /**
3807f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
3812f2fad08SBen Gardon  * @kvm: kvm instance
3822f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3832f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3842f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3852f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3862f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3879a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3889a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3899a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3902f2fad08SBen Gardon  *
3912f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3922f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3932f2fad08SBen Gardon  */
3942f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3959a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3969a77daacSBen Gardon 				  bool shared)
3972f2fad08SBen Gardon {
3982f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3992f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4002f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4012f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4022f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4032f2fad08SBen Gardon 
4042f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4052f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
406764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4072f2fad08SBen Gardon 
4082f2fad08SBen Gardon 	/*
4092f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4102f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4112f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
412d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4132f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4142f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4152f2fad08SBen Gardon 	 * thread before replacement.
4162f2fad08SBen Gardon 	 */
4172f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4182f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4192f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4202f2fad08SBen Gardon 		       "different PFN!\n"
4212f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4222f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4232f2fad08SBen Gardon 
4242f2fad08SBen Gardon 		/*
4252f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
426d9f6e12fSIngo Molnar 		 * corruption.
4272f2fad08SBen Gardon 		 */
4282f2fad08SBen Gardon 		BUG();
4292f2fad08SBen Gardon 	}
4302f2fad08SBen Gardon 
4312f2fad08SBen Gardon 	if (old_spte == new_spte)
4322f2fad08SBen Gardon 		return;
4332f2fad08SBen Gardon 
434b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
435b9a98c34SBen Gardon 
436115111efSDavid Matlack 	if (is_leaf)
437115111efSDavid Matlack 		check_spte_writable_invariants(new_spte);
438115111efSDavid Matlack 
4392f2fad08SBen Gardon 	/*
4402f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4412f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4422f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4432f2fad08SBen Gardon 	 */
4442f2fad08SBen Gardon 	if (!was_present && !is_present) {
4452f2fad08SBen Gardon 		/*
44608f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
44708f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
44808f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
44908f07c80SBen Gardon 		 * are nonpresent.
4502f2fad08SBen Gardon 		 */
45108f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
45208f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
45308f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4542f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4552f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4562f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
45708f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
45808f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4592f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4602f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4612f2fad08SBen Gardon 		return;
4622f2fad08SBen Gardon 	}
4632f2fad08SBen Gardon 
46471f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
46571f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4662f2fad08SBen Gardon 
4672f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
46864bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4692f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4702f2fad08SBen Gardon 
4712f2fad08SBen Gardon 	/*
4722f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4732f2fad08SBen Gardon 	 * the paging structure.
4742f2fad08SBen Gardon 	 */
475a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
4760f53dfa3SDavid Matlack 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
4772f2fad08SBen Gardon }
4782f2fad08SBen Gardon 
4792f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4809a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4819a77daacSBen Gardon 				bool shared)
4822f2fad08SBen Gardon {
4839a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4849a77daacSBen Gardon 			      shared);
485f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
486a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
487a6a0b05dSBen Gardon 				      new_spte, level);
4882f2fad08SBen Gardon }
489faaf05b0SBen Gardon 
490fe43fa2fSBen Gardon /*
4916ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
4926ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
49324ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
4949a77daacSBen Gardon  *
4953255530aSDavid Matlack  * If setting the SPTE fails because it has changed, iter->old_spte will be
4963255530aSDavid Matlack  * refreshed to the current value of the spte.
4973255530aSDavid Matlack  *
4989a77daacSBen Gardon  * @kvm: kvm instance
4999a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5009a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5013e72c791SDavid Matlack  * Return:
5023e72c791SDavid Matlack  * * 0      - If the SPTE was set.
5033e72c791SDavid Matlack  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
5043e72c791SDavid Matlack  *            no side-effects other than setting iter->old_spte to the last
5053e72c791SDavid Matlack  *            known value of the spte.
5069a77daacSBen Gardon  */
5073e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
5089a77daacSBen Gardon 					  struct tdp_iter *iter,
5099a77daacSBen Gardon 					  u64 new_spte)
5109a77daacSBen Gardon {
5113255530aSDavid Matlack 	u64 *sptep = rcu_dereference(iter->sptep);
5123255530aSDavid Matlack 	u64 old_spte;
5133255530aSDavid Matlack 
5143a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
5153a0f64deSSean Christopherson 
5169a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5179a77daacSBen Gardon 
51808f07c80SBen Gardon 	/*
51908f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
52008f07c80SBen Gardon 	 * may modify it.
52108f07c80SBen Gardon 	 */
5227a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
5233e72c791SDavid Matlack 		return -EBUSY;
52408f07c80SBen Gardon 
5256e8eb206SDavid Matlack 	/*
5266e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5276e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5286e8eb206SDavid Matlack 	 */
5293255530aSDavid Matlack 	old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
5303255530aSDavid Matlack 	if (old_spte != iter->old_spte) {
5313255530aSDavid Matlack 		/*
5323255530aSDavid Matlack 		 * The page table entry was modified by a different logical
5333255530aSDavid Matlack 		 * CPU. Refresh iter->old_spte with the current value so the
5343255530aSDavid Matlack 		 * caller operates on fresh data, e.g. if it retries
5353255530aSDavid Matlack 		 * tdp_mmu_set_spte_atomic().
5363255530aSDavid Matlack 		 */
5373255530aSDavid Matlack 		iter->old_spte = old_spte;
5383e72c791SDavid Matlack 		return -EBUSY;
5393255530aSDavid Matlack 	}
5409a77daacSBen Gardon 
54124ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
54208889894SSean Christopherson 			      new_spte, iter->level, true);
54324ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5449a77daacSBen Gardon 
5453e72c791SDavid Matlack 	return 0;
5469a77daacSBen Gardon }
5479a77daacSBen Gardon 
5483e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
54908f07c80SBen Gardon 					  struct tdp_iter *iter)
55008f07c80SBen Gardon {
5513e72c791SDavid Matlack 	int ret;
5523e72c791SDavid Matlack 
55308f07c80SBen Gardon 	/*
55408f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
55508f07c80SBen Gardon 	 * non-present value. This will stop other threads from
55608f07c80SBen Gardon 	 * immediately installing a present entry in its place
55708f07c80SBen Gardon 	 * before the TLBs are flushed.
55808f07c80SBen Gardon 	 */
5593e72c791SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
5603e72c791SDavid Matlack 	if (ret)
5613e72c791SDavid Matlack 		return ret;
56208f07c80SBen Gardon 
56308f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
56408f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
56508f07c80SBen Gardon 
56608f07c80SBen Gardon 	/*
56708f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
56808f07c80SBen Gardon 	 * must either wait on the MMU lock or use
569d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
57008f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
57108f07c80SBen Gardon 	 * here since the SPTE is going from non-present
57208f07c80SBen Gardon 	 * to non-present.
57308f07c80SBen Gardon 	 */
57414f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
57508f07c80SBen Gardon 
5763e72c791SDavid Matlack 	return 0;
57708f07c80SBen Gardon }
57808f07c80SBen Gardon 
5799a77daacSBen Gardon 
5809a77daacSBen Gardon /*
581fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
582fe43fa2fSBen Gardon  * @kvm: kvm instance
583fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
584fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
585fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
586fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
587fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
588fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
589fe43fa2fSBen Gardon  *		      double counted.
590fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
591fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
592fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
593fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
594fe43fa2fSBen Gardon  *		      writes from being double counted.
595fe43fa2fSBen Gardon  */
596f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
597a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
598a6a0b05dSBen Gardon 				      bool record_dirty_log)
599faaf05b0SBen Gardon {
6003a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
6013a0f64deSSean Christopherson 
602531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
6033a9a4aa5SBen Gardon 
60408f07c80SBen Gardon 	/*
60508f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
60608f07c80SBen Gardon 	 * temporary removed SPTE value.
60708f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
60808f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
60908f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
61008f07c80SBen Gardon 	 */
6117a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
61208f07c80SBen Gardon 
6137cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
614faaf05b0SBen Gardon 
61508889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
61608889894SSean Christopherson 			      new_spte, iter->level, false);
617f8e14497SBen Gardon 	if (record_acc_track)
618f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
619f8e14497SBen Gardon 					      iter->level);
620a6a0b05dSBen Gardon 	if (record_dirty_log)
62108889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
622a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
623a6a0b05dSBen Gardon 					      iter->level);
624f8e14497SBen Gardon }
625f8e14497SBen Gardon 
626f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
627f8e14497SBen Gardon 				    u64 new_spte)
628f8e14497SBen Gardon {
629a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
630f8e14497SBen Gardon }
631f8e14497SBen Gardon 
632f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
633f8e14497SBen Gardon 						 struct tdp_iter *iter,
634f8e14497SBen Gardon 						 u64 new_spte)
635f8e14497SBen Gardon {
636a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
637a6a0b05dSBen Gardon }
638a6a0b05dSBen Gardon 
639a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
640a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
641a6a0b05dSBen Gardon 						 u64 new_spte)
642a6a0b05dSBen Gardon {
643a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
644faaf05b0SBen Gardon }
645faaf05b0SBen Gardon 
646faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
647faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
648faaf05b0SBen Gardon 
649f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
650f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
651f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
652f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
653f8e14497SBen Gardon 			continue;					\
654f8e14497SBen Gardon 		else
655f8e14497SBen Gardon 
656bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
657bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
658bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
659bb18842eSBen Gardon 
660faaf05b0SBen Gardon /*
661e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
662e28a436cSBen Gardon  * to the scheduler.
663e28a436cSBen Gardon  *
664e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
665e139a34eSBen Gardon  * TLB flush before yielding.
666e139a34eSBen Gardon  *
6673a0f64deSSean Christopherson  * If this function yields, iter->yielded is set and the caller must skip to
6683a0f64deSSean Christopherson  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
6693a0f64deSSean Christopherson  * over the paging structures to allow the iterator to continue its traversal
6703a0f64deSSean Christopherson  * from the paging structure root.
671e28a436cSBen Gardon  *
6723a0f64deSSean Christopherson  * Returns true if this function yielded.
673e28a436cSBen Gardon  */
6743a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
6753a0f64deSSean Christopherson 							  struct tdp_iter *iter,
6763a0f64deSSean Christopherson 							  bool flush, bool shared)
677a6a0b05dSBen Gardon {
6783a0f64deSSean Christopherson 	WARN_ON(iter->yielded);
6793a0f64deSSean Christopherson 
680ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
681ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
682ed5e484bSBen Gardon 		return false;
683ed5e484bSBen Gardon 
684531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6857cca2d0bSBen Gardon 		rcu_read_unlock();
6867cca2d0bSBen Gardon 
687e139a34eSBen Gardon 		if (flush)
688e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
689e139a34eSBen Gardon 
6906103bc07SBen Gardon 		if (shared)
6916103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
6926103bc07SBen Gardon 		else
693531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
6946103bc07SBen Gardon 
6957cca2d0bSBen Gardon 		rcu_read_lock();
696ed5e484bSBen Gardon 
697ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
698ed5e484bSBen Gardon 
6993a0f64deSSean Christopherson 		iter->yielded = true;
700a6a0b05dSBen Gardon 	}
701e28a436cSBen Gardon 
7023a0f64deSSean Christopherson 	return iter->yielded;
703a6a0b05dSBen Gardon }
704a6a0b05dSBen Gardon 
705faaf05b0SBen Gardon /*
706faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
707faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
708faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
709faaf05b0SBen Gardon  * MMU lock.
7106103bc07SBen Gardon  *
711063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
712063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
713063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
714063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
7156103bc07SBen Gardon  * operation can cause a soft lockup.
7166103bc07SBen Gardon  *
7176103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7186103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7196103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7206103bc07SBen Gardon  * MMU lock in write mode.
721faaf05b0SBen Gardon  */
722faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7236103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7246103bc07SBen Gardon 			  bool shared)
725faaf05b0SBen Gardon {
726524a1e4eSSean Christopherson 	gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
727524a1e4eSSean Christopherson 	bool zap_all = (start == 0 && end >= max_gfn_host);
728faaf05b0SBen Gardon 	struct tdp_iter iter;
729faaf05b0SBen Gardon 
730524a1e4eSSean Christopherson 	/*
7310103098fSSean Christopherson 	 * No need to try to step down in the iterator when zapping all SPTEs,
7320103098fSSean Christopherson 	 * zapping the top-level non-leaf SPTEs will recurse on their children.
7330103098fSSean Christopherson 	 */
7340103098fSSean Christopherson 	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
7350103098fSSean Christopherson 
7360103098fSSean Christopherson 	/*
737524a1e4eSSean Christopherson 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
738524a1e4eSSean Christopherson 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
739524a1e4eSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
740524a1e4eSSean Christopherson 	 */
741524a1e4eSSean Christopherson 	end = min(end, max_gfn_host);
742524a1e4eSSean Christopherson 
7436103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
7446103bc07SBen Gardon 
7457cca2d0bSBen Gardon 	rcu_read_lock();
7467cca2d0bSBen Gardon 
7470103098fSSean Christopherson 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
7480103098fSSean Christopherson 				   min_level, start, end) {
7496103bc07SBen Gardon retry:
7501af4a960SBen Gardon 		if (can_yield &&
7516103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
752a835429cSSean Christopherson 			flush = false;
7531af4a960SBen Gardon 			continue;
7541af4a960SBen Gardon 		}
7551af4a960SBen Gardon 
756faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
757faaf05b0SBen Gardon 			continue;
758faaf05b0SBen Gardon 
759faaf05b0SBen Gardon 		/*
760faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
761faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
762524a1e4eSSean Christopherson 		 * lower level, except when zapping all SPTEs.
763faaf05b0SBen Gardon 		 */
764524a1e4eSSean Christopherson 		if (!zap_all &&
765524a1e4eSSean Christopherson 		    (iter.gfn < start ||
766faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
767faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
768faaf05b0SBen Gardon 			continue;
769faaf05b0SBen Gardon 
7706103bc07SBen Gardon 		if (!shared) {
771faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
772a835429cSSean Christopherson 			flush = true;
7733e72c791SDavid Matlack 		} else if (tdp_mmu_zap_spte_atomic(kvm, &iter)) {
7746103bc07SBen Gardon 			goto retry;
7756103bc07SBen Gardon 		}
776faaf05b0SBen Gardon 	}
7777cca2d0bSBen Gardon 
7787cca2d0bSBen Gardon 	rcu_read_unlock();
779a835429cSSean Christopherson 	return flush;
780faaf05b0SBen Gardon }
781faaf05b0SBen Gardon 
782faaf05b0SBen Gardon /*
783faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
784faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
785faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
786faaf05b0SBen Gardon  * MMU lock.
787faaf05b0SBen Gardon  */
7882b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7895a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
790faaf05b0SBen Gardon {
791faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
792faaf05b0SBen Gardon 
7935a324c24SSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
7946103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
7955a324c24SSean Christopherson 				      false);
796faaf05b0SBen Gardon 
797faaf05b0SBen Gardon 	return flush;
798faaf05b0SBen Gardon }
799faaf05b0SBen Gardon 
800faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
801faaf05b0SBen Gardon {
8022b9663d8SSean Christopherson 	bool flush = false;
8032b9663d8SSean Christopherson 	int i;
804faaf05b0SBen Gardon 
8052b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
8065a324c24SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
8072b9663d8SSean Christopherson 
808faaf05b0SBen Gardon 	if (flush)
809faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
810faaf05b0SBen Gardon }
811bb18842eSBen Gardon 
8124c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
8134c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
8144c6654bdSBen Gardon {
8154c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8164c6654bdSBen Gardon 
8174c6654bdSBen Gardon 	if (prev_root)
8184c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8194c6654bdSBen Gardon 						  &prev_root->link,
8204c6654bdSBen Gardon 						  typeof(*prev_root), link);
8214c6654bdSBen Gardon 	else
8224c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8234c6654bdSBen Gardon 						   typeof(*next_root), link);
8244c6654bdSBen Gardon 
8254c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
8264c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
8274c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8284c6654bdSBen Gardon 						  &next_root->link,
8294c6654bdSBen Gardon 						  typeof(*next_root), link);
8304c6654bdSBen Gardon 
8314c6654bdSBen Gardon 	return next_root;
8324c6654bdSBen Gardon }
8334c6654bdSBen Gardon 
8344c6654bdSBen Gardon /*
8354c6654bdSBen Gardon  * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
8364c6654bdSBen Gardon  * invalidated root, they will not be freed until this function drops the
8374c6654bdSBen Gardon  * reference. Before dropping that reference, tear down the paging
8384c6654bdSBen Gardon  * structure so that whichever thread does drop the last reference
8394c6654bdSBen Gardon  * only has to do a trivial amount of work. Since the roots are invalid,
8404c6654bdSBen Gardon  * no new SPTEs should be created under them.
8414c6654bdSBen Gardon  */
8424c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
8434c6654bdSBen Gardon {
8444c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8454c6654bdSBen Gardon 	struct kvm_mmu_page *root;
8464c6654bdSBen Gardon 	bool flush = false;
8474c6654bdSBen Gardon 
8484c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
8494c6654bdSBen Gardon 
8504c6654bdSBen Gardon 	rcu_read_lock();
8514c6654bdSBen Gardon 
8524c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
8534c6654bdSBen Gardon 
8544c6654bdSBen Gardon 	while (root) {
8554c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
8564c6654bdSBen Gardon 
8574c6654bdSBen Gardon 		rcu_read_unlock();
8584c6654bdSBen Gardon 
859524a1e4eSSean Christopherson 		flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
8604c6654bdSBen Gardon 
8614c6654bdSBen Gardon 		/*
8624c6654bdSBen Gardon 		 * Put the reference acquired in
8634c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
8644c6654bdSBen Gardon 		 */
8654c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
8664c6654bdSBen Gardon 
8674c6654bdSBen Gardon 		root = next_root;
8684c6654bdSBen Gardon 
8694c6654bdSBen Gardon 		rcu_read_lock();
8704c6654bdSBen Gardon 	}
8714c6654bdSBen Gardon 
8724c6654bdSBen Gardon 	rcu_read_unlock();
8734c6654bdSBen Gardon 
8744c6654bdSBen Gardon 	if (flush)
8754c6654bdSBen Gardon 		kvm_flush_remote_tlbs(kvm);
8764c6654bdSBen Gardon }
8774c6654bdSBen Gardon 
878bb18842eSBen Gardon /*
879b7cccd39SBen Gardon  * Mark each TDP MMU root as invalid so that other threads
880b7cccd39SBen Gardon  * will drop their references and allow the root count to
881b7cccd39SBen Gardon  * go to 0.
882b7cccd39SBen Gardon  *
8834c6654bdSBen Gardon  * Also take a reference on all roots so that this thread
8844c6654bdSBen Gardon  * can do the bulk of the work required to free the roots
8854c6654bdSBen Gardon  * once they are invalidated. Without this reference, a
8864c6654bdSBen Gardon  * vCPU thread might drop the last reference to a root and
8874c6654bdSBen Gardon  * get stuck with tearing down the entire paging structure.
8884c6654bdSBen Gardon  *
8894c6654bdSBen Gardon  * Roots which have a zero refcount should be skipped as
8904c6654bdSBen Gardon  * they're already being torn down.
8914c6654bdSBen Gardon  * Already invalid roots should be referenced again so that
8924c6654bdSBen Gardon  * they aren't freed before kvm_tdp_mmu_zap_all_fast is
8934c6654bdSBen Gardon  * done with them.
8944c6654bdSBen Gardon  *
895b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
896b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
897b7cccd39SBen Gardon  */
898b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
899b7cccd39SBen Gardon {
900b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
901b7cccd39SBen Gardon 
902b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
903b7cccd39SBen Gardon 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
9044c6654bdSBen Gardon 		if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
905b7cccd39SBen Gardon 			root->role.invalid = true;
906b7cccd39SBen Gardon }
907b7cccd39SBen Gardon 
908bb18842eSBen Gardon /*
909bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
910bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
911bb18842eSBen Gardon  */
912cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
913cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
914cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
915bb18842eSBen Gardon {
916c435d4b7SSean Christopherson 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
917bb18842eSBen Gardon 	u64 new_spte;
91857a3e96dSKai Huang 	int ret = RET_PF_FIXED;
919ad67e480SPaolo Bonzini 	bool wrprot = false;
920bb18842eSBen Gardon 
9217158bee4SPaolo Bonzini 	WARN_ON(sp->role.level != fault->goal_level);
922e710c5f6SDavid Matlack 	if (unlikely(!fault->slot))
923bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
9249a77daacSBen Gardon 	else
92553597858SDavid Matlack 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
9262839180cSPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefetch, true,
9277158bee4SPaolo Bonzini 					 fault->map_writable, &new_spte);
928bb18842eSBen Gardon 
929bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
930bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
9313e72c791SDavid Matlack 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
9329a77daacSBen Gardon 		return RET_PF_RETRY;
933bb18842eSBen Gardon 
934bb18842eSBen Gardon 	/*
935bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
936bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
937bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
938bb18842eSBen Gardon 	 */
939ad67e480SPaolo Bonzini 	if (wrprot) {
940cdc47767SPaolo Bonzini 		if (fault->write)
941bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
942bb18842eSBen Gardon 	}
943bb18842eSBen Gardon 
944bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
9459a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
9469a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
9479a77daacSBen Gardon 				     new_spte);
948bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
9493849e092SSean Christopherson 	} else {
9509a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
9519a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
9523849e092SSean Christopherson 	}
953bb18842eSBen Gardon 
954857f8474SKai Huang 	/*
955857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
956857f8474SKai Huang 	 * consistent with legacy MMU behavior.
957857f8474SKai Huang 	 */
958857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
959bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
960bb18842eSBen Gardon 
961bb18842eSBen Gardon 	return ret;
962bb18842eSBen Gardon }
963bb18842eSBen Gardon 
964bb18842eSBen Gardon /*
965*7b7e1ab6SDavid Matlack  * tdp_mmu_link_sp_atomic - Atomically replace the given spte with an spte
966*7b7e1ab6SDavid Matlack  * pointing to the provided page table.
967*7b7e1ab6SDavid Matlack  *
968*7b7e1ab6SDavid Matlack  * @kvm: kvm instance
969*7b7e1ab6SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
970*7b7e1ab6SDavid Matlack  * @sp: The new TDP page table to install.
971*7b7e1ab6SDavid Matlack  * @account_nx: True if this page table is being installed to split a
972*7b7e1ab6SDavid Matlack  *              non-executable huge page.
973*7b7e1ab6SDavid Matlack  *
974*7b7e1ab6SDavid Matlack  * Returns: 0 if the new page table was installed. Non-0 if the page table
975*7b7e1ab6SDavid Matlack  *          could not be installed (e.g. the atomic compare-exchange failed).
976*7b7e1ab6SDavid Matlack  */
977*7b7e1ab6SDavid Matlack static int tdp_mmu_link_sp_atomic(struct kvm *kvm, struct tdp_iter *iter,
978*7b7e1ab6SDavid Matlack 				  struct kvm_mmu_page *sp, bool account_nx)
979*7b7e1ab6SDavid Matlack {
980*7b7e1ab6SDavid Matlack 	u64 spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask);
981*7b7e1ab6SDavid Matlack 	int ret;
982*7b7e1ab6SDavid Matlack 
983*7b7e1ab6SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
984*7b7e1ab6SDavid Matlack 	if (ret)
985*7b7e1ab6SDavid Matlack 		return ret;
986*7b7e1ab6SDavid Matlack 
987*7b7e1ab6SDavid Matlack 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
988*7b7e1ab6SDavid Matlack 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
989*7b7e1ab6SDavid Matlack 	if (account_nx)
990*7b7e1ab6SDavid Matlack 		account_huge_nx_page(kvm, sp);
991*7b7e1ab6SDavid Matlack 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
992*7b7e1ab6SDavid Matlack 
993*7b7e1ab6SDavid Matlack 	return 0;
994*7b7e1ab6SDavid Matlack }
995*7b7e1ab6SDavid Matlack 
996*7b7e1ab6SDavid Matlack /*
997bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
998bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
999bb18842eSBen Gardon  */
10002f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1001bb18842eSBen Gardon {
1002bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1003bb18842eSBen Gardon 	struct tdp_iter iter;
100489c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1005bb18842eSBen Gardon 	int ret;
1006bb18842eSBen Gardon 
100773a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
1008bb18842eSBen Gardon 
1009f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
10107cca2d0bSBen Gardon 
10117cca2d0bSBen Gardon 	rcu_read_lock();
10127cca2d0bSBen Gardon 
10132f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
101473a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
1015536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1016bb18842eSBen Gardon 
101773a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
1018bb18842eSBen Gardon 			break;
1019bb18842eSBen Gardon 
1020bb18842eSBen Gardon 		/*
1021bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1022bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1023bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1024bb18842eSBen Gardon 		 */
1025bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1026bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
10273e72c791SDavid Matlack 			if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
10289a77daacSBen Gardon 				break;
1029bb18842eSBen Gardon 
1030bb18842eSBen Gardon 			/*
1031bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1032bb18842eSBen Gardon 			 * because the new value informs the !present
1033bb18842eSBen Gardon 			 * path below.
1034bb18842eSBen Gardon 			 */
10357cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1036bb18842eSBen Gardon 		}
1037bb18842eSBen Gardon 
1038bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
1039*7b7e1ab6SDavid Matlack 			bool account_nx = fault->huge_page_disallowed &&
1040*7b7e1ab6SDavid Matlack 					  fault->req_level >= iter.level;
1041*7b7e1ab6SDavid Matlack 
1042ff76d506SKai Huang 			/*
1043c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1044ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1045ff76d506SKai Huang 			 * allocation and free.
1046ff76d506SKai Huang 			 */
1047ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1048ff76d506SKai Huang 				break;
1049ff76d506SKai Huang 
1050c298a30cSDavid Matlack 			sp = tdp_mmu_alloc_sp(vcpu, iter.gfn, iter.level - 1);
1051*7b7e1ab6SDavid Matlack 			if (tdp_mmu_link_sp_atomic(vcpu->kvm, &iter, sp, account_nx)) {
10529a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
10539a77daacSBen Gardon 				break;
10549a77daacSBen Gardon 			}
1055bb18842eSBen Gardon 		}
1056bb18842eSBen Gardon 	}
1057bb18842eSBen Gardon 
105873a3c659SPaolo Bonzini 	if (iter.level != fault->goal_level) {
10597cca2d0bSBen Gardon 		rcu_read_unlock();
1060bb18842eSBen Gardon 		return RET_PF_RETRY;
10617cca2d0bSBen Gardon 	}
1062bb18842eSBen Gardon 
1063cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
10647cca2d0bSBen Gardon 	rcu_read_unlock();
1065bb18842eSBen Gardon 
1066bb18842eSBen Gardon 	return ret;
1067bb18842eSBen Gardon }
1068063afacdSBen Gardon 
10693039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
10703039bcc7SSean Christopherson 				 bool flush)
1071063afacdSBen Gardon {
107283b83a02SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
107383b83a02SSean Christopherson 					   range->end, range->may_block, flush);
10743039bcc7SSean Christopherson }
10753039bcc7SSean Christopherson 
10763039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
10773039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
10783039bcc7SSean Christopherson 
10793039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
10803039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1081c1b91493SSean Christopherson 						   tdp_handler_t handler)
1082063afacdSBen Gardon {
1083063afacdSBen Gardon 	struct kvm_mmu_page *root;
10843039bcc7SSean Christopherson 	struct tdp_iter iter;
10853039bcc7SSean Christopherson 	bool ret = false;
1086063afacdSBen Gardon 
10873039bcc7SSean Christopherson 	rcu_read_lock();
1088063afacdSBen Gardon 
1089063afacdSBen Gardon 	/*
1090e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1091e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1092063afacdSBen Gardon 	 */
10933039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
10943039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
10953039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
10963039bcc7SSean Christopherson 	}
1097063afacdSBen Gardon 
10983039bcc7SSean Christopherson 	rcu_read_unlock();
1099063afacdSBen Gardon 
1100063afacdSBen Gardon 	return ret;
1101063afacdSBen Gardon }
1102063afacdSBen Gardon 
1103f8e14497SBen Gardon /*
1104f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1105f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1106f8e14497SBen Gardon  */
11073039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
11083039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1109f8e14497SBen Gardon {
1110f8e14497SBen Gardon 	u64 new_spte = 0;
1111f8e14497SBen Gardon 
11123039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
11133039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
11143039bcc7SSean Christopherson 		return false;
11157cca2d0bSBen Gardon 
11163039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1117f8e14497SBen Gardon 
1118f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
11198f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1120f8e14497SBen Gardon 	} else {
1121f8e14497SBen Gardon 		/*
1122f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1123f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1124f8e14497SBen Gardon 		 */
1125f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1126f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1127f8e14497SBen Gardon 
1128f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1129f8e14497SBen Gardon 	}
1130f8e14497SBen Gardon 
11313039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
113233dd3574SBen Gardon 
11333039bcc7SSean Christopherson 	return true;
1134f8e14497SBen Gardon }
1135f8e14497SBen Gardon 
11363039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1137f8e14497SBen Gardon {
11383039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1139f8e14497SBen Gardon }
1140f8e14497SBen Gardon 
11413039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
11423039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1143f8e14497SBen Gardon {
11443039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1145f8e14497SBen Gardon }
1146f8e14497SBen Gardon 
11473039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1148f8e14497SBen Gardon {
11493039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
11503039bcc7SSean Christopherson }
11513039bcc7SSean Christopherson 
11523039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
11533039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
11543039bcc7SSean Christopherson {
11553039bcc7SSean Christopherson 	u64 new_spte;
11563039bcc7SSean Christopherson 
11573039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
11583039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
11593039bcc7SSean Christopherson 
11603039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
11613039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
11623039bcc7SSean Christopherson 		return false;
11633039bcc7SSean Christopherson 
11643039bcc7SSean Christopherson 	/*
11653039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
11663039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
11673039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
11683039bcc7SSean Christopherson 	 * See __handle_changed_spte().
11693039bcc7SSean Christopherson 	 */
11703039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
11713039bcc7SSean Christopherson 
11723039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
11733039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
11743039bcc7SSean Christopherson 								  pte_pfn(range->pte));
11753039bcc7SSean Christopherson 
11763039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
11773039bcc7SSean Christopherson 	}
11783039bcc7SSean Christopherson 
11793039bcc7SSean Christopherson 	return true;
1180f8e14497SBen Gardon }
11811d8dd6b3SBen Gardon 
11821d8dd6b3SBen Gardon /*
11831d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
11841d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
11851d8dd6b3SBen Gardon  * notifier.
11861d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
11871d8dd6b3SBen Gardon  */
11883039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
11891d8dd6b3SBen Gardon {
11903039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
11911d8dd6b3SBen Gardon 
11923039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
11933039bcc7SSean Christopherson 	if (flush)
11943039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
11957cca2d0bSBen Gardon 
11963039bcc7SSean Christopherson 	return false;
11971d8dd6b3SBen Gardon }
11981d8dd6b3SBen Gardon 
1199a6a0b05dSBen Gardon /*
1200bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1201bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1202bedd9195SDavid Matlack  * be flushed.
1203a6a0b05dSBen Gardon  */
1204a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1205a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1206a6a0b05dSBen Gardon {
1207a6a0b05dSBen Gardon 	struct tdp_iter iter;
1208a6a0b05dSBen Gardon 	u64 new_spte;
1209a6a0b05dSBen Gardon 	bool spte_set = false;
1210a6a0b05dSBen Gardon 
12117cca2d0bSBen Gardon 	rcu_read_lock();
12127cca2d0bSBen Gardon 
1213a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1214a6a0b05dSBen Gardon 
1215a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1216a6a0b05dSBen Gardon 				   min_level, start, end) {
121724ae4cfaSBen Gardon retry:
121824ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12191af4a960SBen Gardon 			continue;
12201af4a960SBen Gardon 
1221a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
12220f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
12230f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1224a6a0b05dSBen Gardon 			continue;
1225a6a0b05dSBen Gardon 
1226a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1227a6a0b05dSBen Gardon 
12283e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
122924ae4cfaSBen Gardon 			goto retry;
12303255530aSDavid Matlack 
1231a6a0b05dSBen Gardon 		spte_set = true;
1232a6a0b05dSBen Gardon 	}
12337cca2d0bSBen Gardon 
12347cca2d0bSBen Gardon 	rcu_read_unlock();
1235a6a0b05dSBen Gardon 	return spte_set;
1236a6a0b05dSBen Gardon }
1237a6a0b05dSBen Gardon 
1238a6a0b05dSBen Gardon /*
1239a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1240a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1241a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1242a6a0b05dSBen Gardon  */
1243269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1244269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1245a6a0b05dSBen Gardon {
1246a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1247a6a0b05dSBen Gardon 	bool spte_set = false;
1248a6a0b05dSBen Gardon 
124924ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1250a6a0b05dSBen Gardon 
1251d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1252a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1253a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1254a6a0b05dSBen Gardon 
1255a6a0b05dSBen Gardon 	return spte_set;
1256a6a0b05dSBen Gardon }
1257a6a0b05dSBen Gardon 
1258a6a0b05dSBen Gardon /*
1259a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1260a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1261a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1262a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1263a6a0b05dSBen Gardon  * be flushed.
1264a6a0b05dSBen Gardon  */
1265a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1266a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1267a6a0b05dSBen Gardon {
1268a6a0b05dSBen Gardon 	struct tdp_iter iter;
1269a6a0b05dSBen Gardon 	u64 new_spte;
1270a6a0b05dSBen Gardon 	bool spte_set = false;
1271a6a0b05dSBen Gardon 
12727cca2d0bSBen Gardon 	rcu_read_lock();
12737cca2d0bSBen Gardon 
1274a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
127524ae4cfaSBen Gardon retry:
127624ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12771af4a960SBen Gardon 			continue;
12781af4a960SBen Gardon 
1279a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1280a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1281a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1282a6a0b05dSBen Gardon 			else
1283a6a0b05dSBen Gardon 				continue;
1284a6a0b05dSBen Gardon 		} else {
1285a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1286a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1287a6a0b05dSBen Gardon 			else
1288a6a0b05dSBen Gardon 				continue;
1289a6a0b05dSBen Gardon 		}
1290a6a0b05dSBen Gardon 
12913e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
129224ae4cfaSBen Gardon 			goto retry;
12933255530aSDavid Matlack 
1294a6a0b05dSBen Gardon 		spte_set = true;
1295a6a0b05dSBen Gardon 	}
12967cca2d0bSBen Gardon 
12977cca2d0bSBen Gardon 	rcu_read_unlock();
1298a6a0b05dSBen Gardon 	return spte_set;
1299a6a0b05dSBen Gardon }
1300a6a0b05dSBen Gardon 
1301a6a0b05dSBen Gardon /*
1302a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1303a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1304a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1305a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1306a6a0b05dSBen Gardon  * be flushed.
1307a6a0b05dSBen Gardon  */
1308269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1309269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1310a6a0b05dSBen Gardon {
1311a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1312a6a0b05dSBen Gardon 	bool spte_set = false;
1313a6a0b05dSBen Gardon 
131424ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1315a6a0b05dSBen Gardon 
1316d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1317a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1318a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1319a6a0b05dSBen Gardon 
1320a6a0b05dSBen Gardon 	return spte_set;
1321a6a0b05dSBen Gardon }
1322a6a0b05dSBen Gardon 
1323a6a0b05dSBen Gardon /*
1324a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1325a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1326a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1327a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1328a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1329a6a0b05dSBen Gardon  */
1330a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1331a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1332a6a0b05dSBen Gardon {
1333a6a0b05dSBen Gardon 	struct tdp_iter iter;
1334a6a0b05dSBen Gardon 	u64 new_spte;
1335a6a0b05dSBen Gardon 
13367cca2d0bSBen Gardon 	rcu_read_lock();
13377cca2d0bSBen Gardon 
1338a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1339a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1340a6a0b05dSBen Gardon 		if (!mask)
1341a6a0b05dSBen Gardon 			break;
1342a6a0b05dSBen Gardon 
1343a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1344a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1345a6a0b05dSBen Gardon 			continue;
1346a6a0b05dSBen Gardon 
1347f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1348f1b3b06aSBen Gardon 
1349a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1350a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1351a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1352a6a0b05dSBen Gardon 			else
1353a6a0b05dSBen Gardon 				continue;
1354a6a0b05dSBen Gardon 		} else {
1355a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1356a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1357a6a0b05dSBen Gardon 			else
1358a6a0b05dSBen Gardon 				continue;
1359a6a0b05dSBen Gardon 		}
1360a6a0b05dSBen Gardon 
1361a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1362a6a0b05dSBen Gardon 	}
13637cca2d0bSBen Gardon 
13647cca2d0bSBen Gardon 	rcu_read_unlock();
1365a6a0b05dSBen Gardon }
1366a6a0b05dSBen Gardon 
1367a6a0b05dSBen Gardon /*
1368a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1369a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1370a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1371a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1372a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1373a6a0b05dSBen Gardon  */
1374a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1375a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1376a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1377a6a0b05dSBen Gardon 				       bool wrprot)
1378a6a0b05dSBen Gardon {
1379a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1380a6a0b05dSBen Gardon 
1381531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1382a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1383a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1384a6a0b05dSBen Gardon }
1385a6a0b05dSBen Gardon 
1386a6a0b05dSBen Gardon /*
138787aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
138887aa9ec9SBen Gardon  * GFNs within the slot.
138914881998SBen Gardon  */
13904b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm,
139114881998SBen Gardon 				       struct kvm_mmu_page *root,
13924b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
139314881998SBen Gardon {
13949eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
13959eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
139614881998SBen Gardon 	struct tdp_iter iter;
139714881998SBen Gardon 	kvm_pfn_t pfn;
139814881998SBen Gardon 
13997cca2d0bSBen Gardon 	rcu_read_lock();
14007cca2d0bSBen Gardon 
140114881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
14022db6f772SBen Gardon retry:
14034b85c921SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
14041af4a960SBen Gardon 			continue;
14051af4a960SBen Gardon 
140614881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
140787aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
140814881998SBen Gardon 			continue;
140914881998SBen Gardon 
141014881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
141114881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
14129eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
14139eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
141414881998SBen Gardon 			continue;
141514881998SBen Gardon 
14164b85c921SSean Christopherson 		/* Note, a successful atomic zap also does a remote TLB flush. */
14173e72c791SDavid Matlack 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
14182db6f772SBen Gardon 			goto retry;
14192db6f772SBen Gardon 	}
142014881998SBen Gardon 
14217cca2d0bSBen Gardon 	rcu_read_unlock();
142214881998SBen Gardon }
142314881998SBen Gardon 
142414881998SBen Gardon /*
142514881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
142614881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
142714881998SBen Gardon  */
14284b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
14294b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
143014881998SBen Gardon {
143114881998SBen Gardon 	struct kvm_mmu_page *root;
143214881998SBen Gardon 
14332db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
143414881998SBen Gardon 
1435d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
14364b85c921SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
143714881998SBen Gardon }
143846044f72SBen Gardon 
143946044f72SBen Gardon /*
144046044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
14415fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
144246044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
144346044f72SBen Gardon  */
144446044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
14453ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
144646044f72SBen Gardon {
144746044f72SBen Gardon 	struct tdp_iter iter;
144846044f72SBen Gardon 	u64 new_spte;
144946044f72SBen Gardon 	bool spte_set = false;
145046044f72SBen Gardon 
14513ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
14523ad93562SKeqian Zhu 
14537cca2d0bSBen Gardon 	rcu_read_lock();
14547cca2d0bSBen Gardon 
14553ad93562SKeqian Zhu 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
14563ad93562SKeqian Zhu 				   min_level, gfn, gfn + 1) {
14573ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
14583ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
14593ad93562SKeqian Zhu 			continue;
14603ad93562SKeqian Zhu 
146146044f72SBen Gardon 		new_spte = iter.old_spte &
14625fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
146346044f72SBen Gardon 
14647c8a4742SDavid Matlack 		if (new_spte == iter.old_spte)
14657c8a4742SDavid Matlack 			break;
14667c8a4742SDavid Matlack 
146746044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
146846044f72SBen Gardon 		spte_set = true;
146946044f72SBen Gardon 	}
147046044f72SBen Gardon 
14717cca2d0bSBen Gardon 	rcu_read_unlock();
14727cca2d0bSBen Gardon 
147346044f72SBen Gardon 	return spte_set;
147446044f72SBen Gardon }
147546044f72SBen Gardon 
147646044f72SBen Gardon /*
147746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
14785fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
147946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
148046044f72SBen Gardon  */
148146044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
14823ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
14833ad93562SKeqian Zhu 				   int min_level)
148446044f72SBen Gardon {
148546044f72SBen Gardon 	struct kvm_mmu_page *root;
148646044f72SBen Gardon 	bool spte_set = false;
148746044f72SBen Gardon 
1488531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1489a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
14903ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1491a3f15bdaSSean Christopherson 
149246044f72SBen Gardon 	return spte_set;
149346044f72SBen Gardon }
149446044f72SBen Gardon 
149595fb5b02SBen Gardon /*
149695fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
149795fb5b02SBen Gardon  * That SPTE may be non-present.
1498c5c8c7c5SDavid Matlack  *
1499c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
150095fb5b02SBen Gardon  */
150139b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
150239b4d43eSSean Christopherson 			 int *root_level)
150395fb5b02SBen Gardon {
150495fb5b02SBen Gardon 	struct tdp_iter iter;
150595fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
150695fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
15072aa07893SSean Christopherson 	int leaf = -1;
150895fb5b02SBen Gardon 
150939b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
151095fb5b02SBen Gardon 
151195fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
151295fb5b02SBen Gardon 		leaf = iter.level;
1513dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
151495fb5b02SBen Gardon 	}
151595fb5b02SBen Gardon 
151695fb5b02SBen Gardon 	return leaf;
151795fb5b02SBen Gardon }
15186e8eb206SDavid Matlack 
15196e8eb206SDavid Matlack /*
15206e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
15216e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
15226e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
15236e8eb206SDavid Matlack  *
15246e8eb206SDavid Matlack  * Contract:
15256e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
15266e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
15276e8eb206SDavid Matlack  *
15286e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
15296e8eb206SDavid Matlack  */
15306e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
15316e8eb206SDavid Matlack 					u64 *spte)
15326e8eb206SDavid Matlack {
15336e8eb206SDavid Matlack 	struct tdp_iter iter;
15346e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
15356e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
15366e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
15376e8eb206SDavid Matlack 
15386e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
15396e8eb206SDavid Matlack 		*spte = iter.old_spte;
15406e8eb206SDavid Matlack 		sptep = iter.sptep;
15416e8eb206SDavid Matlack 	}
15426e8eb206SDavid Matlack 
15436e8eb206SDavid Matlack 	/*
15446e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
15456e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
15466e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
15476e8eb206SDavid Matlack 	 * annotation.
15486e8eb206SDavid Matlack 	 *
15496e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
15506e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
15516e8eb206SDavid Matlack 	 * outside of mmu_lock.
15526e8eb206SDavid Matlack 	 */
15536e8eb206SDavid Matlack 	return rcu_dereference(sptep);
15546e8eb206SDavid Matlack }
1555