xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision c0e64238)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
442bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
452bdb3d84SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush);
462bdb3d84SBen Gardon 
472bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
48a889ea54SBen Gardon {
492bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
502bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
512bdb3d84SBen Gardon }
522bdb3d84SBen Gardon 
53*c0e64238SBen Gardon /*
54*c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
55*c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
56*c0e64238SBen Gardon  * the memory.
57*c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
58*c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
59*c0e64238SBen Gardon  * memory won't use it after it is freed.
60*c0e64238SBen Gardon  */
61*c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
62*c0e64238SBen Gardon {
63*c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
64*c0e64238SBen Gardon 					       rcu_head);
65*c0e64238SBen Gardon 
66*c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
67*c0e64238SBen Gardon }
68*c0e64238SBen Gardon 
692bdb3d84SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
702bdb3d84SBen Gardon {
712bdb3d84SBen Gardon 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
722bdb3d84SBen Gardon 
732bdb3d84SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
742bdb3d84SBen Gardon 
7511cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
762bdb3d84SBen Gardon 		return;
772bdb3d84SBen Gardon 
782bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
792bdb3d84SBen Gardon 
80*c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
81*c0e64238SBen Gardon 	list_del_rcu(&root->link);
82*c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
832bdb3d84SBen Gardon 
842bdb3d84SBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false, false);
852bdb3d84SBen Gardon 
86*c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
87a889ea54SBen Gardon }
88a889ea54SBen Gardon 
89cfc10997SBen Gardon /*
90cfc10997SBen Gardon  * Finds the next valid root after root (or the first valid root if root
91cfc10997SBen Gardon  * is NULL), takes a reference on it, and returns that next root. If root
92cfc10997SBen Gardon  * is not NULL, this thread should have already taken a reference on it, and
93cfc10997SBen Gardon  * that reference will be dropped. If no valid root is found, this
94cfc10997SBen Gardon  * function will return NULL.
95cfc10997SBen Gardon  */
96cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
97cfc10997SBen Gardon 					      struct kvm_mmu_page *prev_root)
98a889ea54SBen Gardon {
99a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
100a889ea54SBen Gardon 
101cfc10997SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
102cfc10997SBen Gardon 
103*c0e64238SBen Gardon 	rcu_read_lock();
104*c0e64238SBen Gardon 
105cfc10997SBen Gardon 	if (prev_root)
106*c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
107*c0e64238SBen Gardon 						  &prev_root->link,
108*c0e64238SBen Gardon 						  typeof(*prev_root), link);
109cfc10997SBen Gardon 	else
110*c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
111cfc10997SBen Gardon 						   typeof(*next_root), link);
112cfc10997SBen Gardon 
113*c0e64238SBen Gardon 	while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
114*c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
115*c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
116fb101293SBen Gardon 
117*c0e64238SBen Gardon 	rcu_read_unlock();
118cfc10997SBen Gardon 
119cfc10997SBen Gardon 	if (prev_root)
120cfc10997SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root);
121cfc10997SBen Gardon 
122a889ea54SBen Gardon 	return next_root;
123a889ea54SBen Gardon }
124a889ea54SBen Gardon 
125a889ea54SBen Gardon /*
126a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
127a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
128a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
129a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
130a889ea54SBen Gardon  */
131a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)	\
132cfc10997SBen Gardon 	for (_root = tdp_mmu_next_root(_kvm, NULL);		\
133cfc10997SBen Gardon 	     _root;						\
134a3f15bdaSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root))		\
135a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {	\
136a3f15bdaSSean Christopherson 		} else
137a889ea54SBen Gardon 
138a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)				\
139*c0e64238SBen Gardon 	list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link,		\
140*c0e64238SBen Gardon 				lockdep_is_held_type(&kvm->mmu_lock, 0) ||	\
141*c0e64238SBen Gardon 				lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock))	\
142a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
143a3f15bdaSSean Christopherson 		} else
14402c00b3aSBen Gardon 
14502c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
14602c00b3aSBen Gardon 						   int level)
14702c00b3aSBen Gardon {
14802c00b3aSBen Gardon 	union kvm_mmu_page_role role;
14902c00b3aSBen Gardon 
15002c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
15102c00b3aSBen Gardon 	role.level = level;
15202c00b3aSBen Gardon 	role.direct = true;
15302c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
15402c00b3aSBen Gardon 	role.access = ACC_ALL;
15502c00b3aSBen Gardon 
15602c00b3aSBen Gardon 	return role;
15702c00b3aSBen Gardon }
15802c00b3aSBen Gardon 
15902c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
16002c00b3aSBen Gardon 					       int level)
16102c00b3aSBen Gardon {
16202c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
16302c00b3aSBen Gardon 
16402c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
16502c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
16602c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
16702c00b3aSBen Gardon 
16802c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
16902c00b3aSBen Gardon 	sp->gfn = gfn;
17002c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
17102c00b3aSBen Gardon 
17233dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
17333dd3574SBen Gardon 
17402c00b3aSBen Gardon 	return sp;
17502c00b3aSBen Gardon }
17602c00b3aSBen Gardon 
1776e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
17802c00b3aSBen Gardon {
17902c00b3aSBen Gardon 	union kvm_mmu_page_role role;
18002c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
18102c00b3aSBen Gardon 	struct kvm_mmu_page *root;
18202c00b3aSBen Gardon 
1836e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
18402c00b3aSBen Gardon 
1856e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
18602c00b3aSBen Gardon 
18702c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
188a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
189fb101293SBen Gardon 		if (root->role.word == role.word &&
190fb101293SBen Gardon 		    kvm_tdp_mmu_get_root(kvm, root))
1916e6ec584SSean Christopherson 			goto out;
19202c00b3aSBen Gardon 	}
19302c00b3aSBen Gardon 
19402c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
19511cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
19602c00b3aSBen Gardon 
197*c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
198*c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
199*c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
20002c00b3aSBen Gardon 
2016e6ec584SSean Christopherson out:
20202c00b3aSBen Gardon 	return __pa(root->spt);
203fe5db27dSBen Gardon }
2042f2fad08SBen Gardon 
2052f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2069a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2079a77daacSBen Gardon 				bool shared);
2082f2fad08SBen Gardon 
209f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
210f8e14497SBen Gardon {
211f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
212f8e14497SBen Gardon 		return;
213f8e14497SBen Gardon 
214f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
21564bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
21664bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
217f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
218f8e14497SBen Gardon }
219f8e14497SBen Gardon 
220a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
221a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
222a6a0b05dSBen Gardon {
223a6a0b05dSBen Gardon 	bool pfn_changed;
224a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
225a6a0b05dSBen Gardon 
226a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
227a6a0b05dSBen Gardon 		return;
228a6a0b05dSBen Gardon 
229a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
230a6a0b05dSBen Gardon 
231a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
232a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
233a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
234fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
235a6a0b05dSBen Gardon 	}
236a6a0b05dSBen Gardon }
237a6a0b05dSBen Gardon 
2382f2fad08SBen Gardon /**
239a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
240a9442f59SBen Gardon  *
241a9442f59SBen Gardon  * @kvm: kvm instance
242a9442f59SBen Gardon  * @sp: the new page
2439a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2449a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2459a77daacSBen Gardon  *	    threads that might be adding or removing pages.
246a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
247a9442f59SBen Gardon  *		eventual reclaim.
248a9442f59SBen Gardon  */
249a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2509a77daacSBen Gardon 			      bool shared, bool account_nx)
251a9442f59SBen Gardon {
2529a77daacSBen Gardon 	if (shared)
2539a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2549a77daacSBen Gardon 	else
255a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
256a9442f59SBen Gardon 
257a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
258a9442f59SBen Gardon 	if (account_nx)
259a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2609a77daacSBen Gardon 
2619a77daacSBen Gardon 	if (shared)
2629a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
263a9442f59SBen Gardon }
264a9442f59SBen Gardon 
265a9442f59SBen Gardon /**
266a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
267a9442f59SBen Gardon  *
268a9442f59SBen Gardon  * @kvm: kvm instance
269a9442f59SBen Gardon  * @sp: the page to be removed
2709a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2719a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2729a77daacSBen Gardon  *	    threads that might be adding or removing pages.
273a9442f59SBen Gardon  */
2749a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2759a77daacSBen Gardon 				bool shared)
276a9442f59SBen Gardon {
2779a77daacSBen Gardon 	if (shared)
2789a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2799a77daacSBen Gardon 	else
280a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
281a9442f59SBen Gardon 
282a9442f59SBen Gardon 	list_del(&sp->link);
283a9442f59SBen Gardon 	if (sp->lpage_disallowed)
284a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2859a77daacSBen Gardon 
2869a77daacSBen Gardon 	if (shared)
2879a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
288a9442f59SBen Gardon }
289a9442f59SBen Gardon 
290a9442f59SBen Gardon /**
291a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
292a066e61fSBen Gardon  *
293a066e61fSBen Gardon  * @kvm: kvm instance
294a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2959a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2969a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
2979a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
298a066e61fSBen Gardon  *
299a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
300a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
30170fb3e41SBen Gardon  *
30270fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
30370fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
30470fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
30570fb3e41SBen Gardon  * early rcu_dereferences in the function.
306a066e61fSBen Gardon  */
30770fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
3089a77daacSBen Gardon 					bool shared)
309a066e61fSBen Gardon {
31070fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
311a066e61fSBen Gardon 	int level = sp->role.level;
312e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
313a066e61fSBen Gardon 	u64 old_child_spte;
3149a77daacSBen Gardon 	u64 *sptep;
315e25f0e0cSBen Gardon 	gfn_t gfn;
316a066e61fSBen Gardon 	int i;
317a066e61fSBen Gardon 
318a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
319a066e61fSBen Gardon 
3209a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
321a066e61fSBen Gardon 
322a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
32370fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
324e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3259a77daacSBen Gardon 
3269a77daacSBen Gardon 		if (shared) {
327e25f0e0cSBen Gardon 			/*
328e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
329e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
330e25f0e0cSBen Gardon 			 * already marked as removed then another thread
331e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
332e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
333e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
334e25f0e0cSBen Gardon 			 */
335e25f0e0cSBen Gardon 			for (;;) {
336e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
337e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
338e25f0e0cSBen Gardon 					break;
339e25f0e0cSBen Gardon 				cpu_relax();
340e25f0e0cSBen Gardon 			}
3419a77daacSBen Gardon 		} else {
3428df9f1afSSean Christopherson 			/*
3438df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3448df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3458df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3468df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3478df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3488df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3498df9f1afSSean Christopherson 			 * unreachable.
3508df9f1afSSean Christopherson 			 */
3519a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3528df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3538df9f1afSSean Christopherson 				continue;
354e25f0e0cSBen Gardon 
355e25f0e0cSBen Gardon 			/*
356e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
357e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
358e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
359e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
360e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
361e25f0e0cSBen Gardon 			 * the function.
362e25f0e0cSBen Gardon 			 */
363e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3649a77daacSBen Gardon 		}
365e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
366e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
367e25f0e0cSBen Gardon 				    shared);
368a066e61fSBen Gardon 	}
369a066e61fSBen Gardon 
370a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
371a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
372a066e61fSBen Gardon 
3737cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
374a066e61fSBen Gardon }
375a066e61fSBen Gardon 
376a066e61fSBen Gardon /**
3772f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3782f2fad08SBen Gardon  * @kvm: kvm instance
3792f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3802f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3812f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3822f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3832f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3849a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3859a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3869a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3872f2fad08SBen Gardon  *
3882f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3892f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3902f2fad08SBen Gardon  */
3912f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3929a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3939a77daacSBen Gardon 				  bool shared)
3942f2fad08SBen Gardon {
3952f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3962f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3972f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3982f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3992f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4002f2fad08SBen Gardon 
4012f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4022f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
403764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4042f2fad08SBen Gardon 
4052f2fad08SBen Gardon 	/*
4062f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4072f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4082f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
4092f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
4102f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4112f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4122f2fad08SBen Gardon 	 * thread before replacement.
4132f2fad08SBen Gardon 	 */
4142f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4152f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4162f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4172f2fad08SBen Gardon 		       "different PFN!\n"
4182f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4192f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4202f2fad08SBen Gardon 
4212f2fad08SBen Gardon 		/*
4222f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4232f2fad08SBen Gardon 		 * courruption.
4242f2fad08SBen Gardon 		 */
4252f2fad08SBen Gardon 		BUG();
4262f2fad08SBen Gardon 	}
4272f2fad08SBen Gardon 
4282f2fad08SBen Gardon 	if (old_spte == new_spte)
4292f2fad08SBen Gardon 		return;
4302f2fad08SBen Gardon 
431b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
432b9a98c34SBen Gardon 
4332f2fad08SBen Gardon 	/*
4342f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4352f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4362f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4372f2fad08SBen Gardon 	 */
4382f2fad08SBen Gardon 	if (!was_present && !is_present) {
4392f2fad08SBen Gardon 		/*
44008f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
44108f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
44208f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
44308f07c80SBen Gardon 		 * are nonpresent.
4442f2fad08SBen Gardon 		 */
44508f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
44608f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
44708f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4482f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4492f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4502f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
45108f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
45208f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4532f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4542f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4552f2fad08SBen Gardon 		return;
4562f2fad08SBen Gardon 	}
4572f2fad08SBen Gardon 
4582f2fad08SBen Gardon 
4592f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
46064bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4612f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4622f2fad08SBen Gardon 
4632f2fad08SBen Gardon 	/*
4642f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4652f2fad08SBen Gardon 	 * the paging structure.
4662f2fad08SBen Gardon 	 */
467a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
468a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4699a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4702f2fad08SBen Gardon }
4712f2fad08SBen Gardon 
4722f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4739a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4749a77daacSBen Gardon 				bool shared)
4752f2fad08SBen Gardon {
4769a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4779a77daacSBen Gardon 			      shared);
478f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
479a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
480a6a0b05dSBen Gardon 				      new_spte, level);
4812f2fad08SBen Gardon }
482faaf05b0SBen Gardon 
483fe43fa2fSBen Gardon /*
4849a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4859a77daacSBen Gardon  * associated bookkeeping
4869a77daacSBen Gardon  *
4879a77daacSBen Gardon  * @kvm: kvm instance
4889a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4899a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4909a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4919a77daacSBen Gardon  *	    this function will have no side-effects.
4929a77daacSBen Gardon  */
4939a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4949a77daacSBen Gardon 					   struct tdp_iter *iter,
4959a77daacSBen Gardon 					   u64 new_spte)
4969a77daacSBen Gardon {
4979a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4989a77daacSBen Gardon 
49908f07c80SBen Gardon 	/*
50008f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
50108f07c80SBen Gardon 	 * may modify it.
50208f07c80SBen Gardon 	 */
5037a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
50408f07c80SBen Gardon 		return false;
50508f07c80SBen Gardon 
5069a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
5079a77daacSBen Gardon 		      new_spte) != iter->old_spte)
5089a77daacSBen Gardon 		return false;
5099a77daacSBen Gardon 
51008889894SSean Christopherson 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
51108889894SSean Christopherson 			    new_spte, iter->level, true);
5129a77daacSBen Gardon 
5139a77daacSBen Gardon 	return true;
5149a77daacSBen Gardon }
5159a77daacSBen Gardon 
51608f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
51708f07c80SBen Gardon 					   struct tdp_iter *iter)
51808f07c80SBen Gardon {
51908f07c80SBen Gardon 	/*
52008f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
52108f07c80SBen Gardon 	 * non-present value. This will stop other threads from
52208f07c80SBen Gardon 	 * immediately installing a present entry in its place
52308f07c80SBen Gardon 	 * before the TLBs are flushed.
52408f07c80SBen Gardon 	 */
52508f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
52608f07c80SBen Gardon 		return false;
52708f07c80SBen Gardon 
52808f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
52908f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
53008f07c80SBen Gardon 
53108f07c80SBen Gardon 	/*
53208f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
53308f07c80SBen Gardon 	 * must either wait on the MMU lock or use
53408f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
53508f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
53608f07c80SBen Gardon 	 * here since the SPTE is going from non-present
53708f07c80SBen Gardon 	 * to non-present.
53808f07c80SBen Gardon 	 */
53914f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
54008f07c80SBen Gardon 
54108f07c80SBen Gardon 	return true;
54208f07c80SBen Gardon }
54308f07c80SBen Gardon 
5449a77daacSBen Gardon 
5459a77daacSBen Gardon /*
546fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
547fe43fa2fSBen Gardon  * @kvm: kvm instance
548fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
549fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
550fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
551fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
552fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
553fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
554fe43fa2fSBen Gardon  *		      double counted.
555fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
556fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
557fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
558fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
559fe43fa2fSBen Gardon  *		      writes from being double counted.
560fe43fa2fSBen Gardon  */
561f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
562a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
563a6a0b05dSBen Gardon 				      bool record_dirty_log)
564faaf05b0SBen Gardon {
565531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5663a9a4aa5SBen Gardon 
56708f07c80SBen Gardon 	/*
56808f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
56908f07c80SBen Gardon 	 * temporary removed SPTE value.
57008f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
57108f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
57208f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
57308f07c80SBen Gardon 	 */
5747a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
57508f07c80SBen Gardon 
5767cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
577faaf05b0SBen Gardon 
57808889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
57908889894SSean Christopherson 			      new_spte, iter->level, false);
580f8e14497SBen Gardon 	if (record_acc_track)
581f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
582f8e14497SBen Gardon 					      iter->level);
583a6a0b05dSBen Gardon 	if (record_dirty_log)
58408889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
585a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
586a6a0b05dSBen Gardon 					      iter->level);
587f8e14497SBen Gardon }
588f8e14497SBen Gardon 
589f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
590f8e14497SBen Gardon 				    u64 new_spte)
591f8e14497SBen Gardon {
592a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
593f8e14497SBen Gardon }
594f8e14497SBen Gardon 
595f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
596f8e14497SBen Gardon 						 struct tdp_iter *iter,
597f8e14497SBen Gardon 						 u64 new_spte)
598f8e14497SBen Gardon {
599a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
600a6a0b05dSBen Gardon }
601a6a0b05dSBen Gardon 
602a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
603a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
604a6a0b05dSBen Gardon 						 u64 new_spte)
605a6a0b05dSBen Gardon {
606a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
607faaf05b0SBen Gardon }
608faaf05b0SBen Gardon 
609faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
610faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
611faaf05b0SBen Gardon 
612f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
613f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
614f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
615f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
616f8e14497SBen Gardon 			continue;					\
617f8e14497SBen Gardon 		else
618f8e14497SBen Gardon 
619bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
620bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
621bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
622bb18842eSBen Gardon 
623faaf05b0SBen Gardon /*
624e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
625e28a436cSBen Gardon  * to the scheduler.
626e28a436cSBen Gardon  *
627e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
628e139a34eSBen Gardon  * TLB flush before yielding.
629e139a34eSBen Gardon  *
630e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
631ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
632ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
633ed5e484bSBen Gardon  * paging structure root.
634e28a436cSBen Gardon  *
635e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
636e28a436cSBen Gardon  * Return false if a yield was not needed.
637e28a436cSBen Gardon  */
638e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
639e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
640a6a0b05dSBen Gardon {
641ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
642ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
643ed5e484bSBen Gardon 		return false;
644ed5e484bSBen Gardon 
645531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6467cca2d0bSBen Gardon 		rcu_read_unlock();
6477cca2d0bSBen Gardon 
648e139a34eSBen Gardon 		if (flush)
649e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
650e139a34eSBen Gardon 
651531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6527cca2d0bSBen Gardon 		rcu_read_lock();
653ed5e484bSBen Gardon 
654ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
655ed5e484bSBen Gardon 
656b601c3bcSBen Gardon 		tdp_iter_restart(iter);
657ed5e484bSBen Gardon 
658e28a436cSBen Gardon 		return true;
659a6a0b05dSBen Gardon 	}
660e28a436cSBen Gardon 
661e28a436cSBen Gardon 	return false;
662a6a0b05dSBen Gardon }
663a6a0b05dSBen Gardon 
664faaf05b0SBen Gardon /*
665faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
666faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
667faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
668faaf05b0SBen Gardon  * MMU lock.
669063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
670063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
671063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
672063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
673a835429cSSean Christopherson  * operation can cause a soft lockup.  Note, in some use cases a flush may be
674a835429cSSean Christopherson  * required by prior actions.  Ensure the pending flush is performed prior to
675a835429cSSean Christopherson  * yielding.
676faaf05b0SBen Gardon  */
677faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
678a835429cSSean Christopherson 			  gfn_t start, gfn_t end, bool can_yield, bool flush)
679faaf05b0SBen Gardon {
680faaf05b0SBen Gardon 	struct tdp_iter iter;
681faaf05b0SBen Gardon 
6827cca2d0bSBen Gardon 	rcu_read_lock();
6837cca2d0bSBen Gardon 
684faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6851af4a960SBen Gardon 		if (can_yield &&
686a835429cSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
687a835429cSSean Christopherson 			flush = false;
6881af4a960SBen Gardon 			continue;
6891af4a960SBen Gardon 		}
6901af4a960SBen Gardon 
691faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
692faaf05b0SBen Gardon 			continue;
693faaf05b0SBen Gardon 
694faaf05b0SBen Gardon 		/*
695faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
696faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
697faaf05b0SBen Gardon 		 * lower level.
698faaf05b0SBen Gardon 		 */
699faaf05b0SBen Gardon 		if ((iter.gfn < start ||
700faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
701faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
702faaf05b0SBen Gardon 			continue;
703faaf05b0SBen Gardon 
704faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
705a835429cSSean Christopherson 		flush = true;
706faaf05b0SBen Gardon 	}
7077cca2d0bSBen Gardon 
7087cca2d0bSBen Gardon 	rcu_read_unlock();
709a835429cSSean Christopherson 	return flush;
710faaf05b0SBen Gardon }
711faaf05b0SBen Gardon 
712faaf05b0SBen Gardon /*
713faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
714faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
715faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
716faaf05b0SBen Gardon  * MMU lock.
717faaf05b0SBen Gardon  */
7182b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7192b9663d8SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
720faaf05b0SBen Gardon {
721faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
722faaf05b0SBen Gardon 
723a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
72433a31641SSean Christopherson 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
725faaf05b0SBen Gardon 
726faaf05b0SBen Gardon 	return flush;
727faaf05b0SBen Gardon }
728faaf05b0SBen Gardon 
729faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
730faaf05b0SBen Gardon {
731339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
7322b9663d8SSean Christopherson 	bool flush = false;
7332b9663d8SSean Christopherson 	int i;
734faaf05b0SBen Gardon 
7352b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7362b9663d8SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
7372b9663d8SSean Christopherson 
738faaf05b0SBen Gardon 	if (flush)
739faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
740faaf05b0SBen Gardon }
741bb18842eSBen Gardon 
742bb18842eSBen Gardon /*
743bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
744bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
745bb18842eSBen Gardon  */
746bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
747bb18842eSBen Gardon 					  int map_writable,
748bb18842eSBen Gardon 					  struct tdp_iter *iter,
749bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
750bb18842eSBen Gardon {
751bb18842eSBen Gardon 	u64 new_spte;
752bb18842eSBen Gardon 	int ret = 0;
753bb18842eSBen Gardon 	int make_spte_ret = 0;
754bb18842eSBen Gardon 
7559a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
756bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7579a77daacSBen Gardon 	else
758bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
759bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
760bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
761bb18842eSBen Gardon 					 &new_spte);
762bb18842eSBen Gardon 
763bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
764bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7659a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7669a77daacSBen Gardon 		return RET_PF_RETRY;
767bb18842eSBen Gardon 
768bb18842eSBen Gardon 	/*
769bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
770bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
771bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
772bb18842eSBen Gardon 	 */
773bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
774bb18842eSBen Gardon 		if (write)
775bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
776bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
777bb18842eSBen Gardon 	}
778bb18842eSBen Gardon 
779bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7809a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7819a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7829a77daacSBen Gardon 				     new_spte);
783bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
7843849e092SSean Christopherson 	} else {
7859a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7869a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
7873849e092SSean Christopherson 	}
788bb18842eSBen Gardon 
789bb18842eSBen Gardon 	if (!prefault)
790bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
791bb18842eSBen Gardon 
792bb18842eSBen Gardon 	return ret;
793bb18842eSBen Gardon }
794bb18842eSBen Gardon 
795bb18842eSBen Gardon /*
796bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
797bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
798bb18842eSBen Gardon  */
799bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
800bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
801bb18842eSBen Gardon 		    bool prefault)
802bb18842eSBen Gardon {
803bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
804bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
805bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
806bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
807bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
808bb18842eSBen Gardon 	struct tdp_iter iter;
80989c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
810bb18842eSBen Gardon 	u64 *child_pt;
811bb18842eSBen Gardon 	u64 new_spte;
812bb18842eSBen Gardon 	int ret;
813bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
814bb18842eSBen Gardon 	int level;
815bb18842eSBen Gardon 	int req_level;
816bb18842eSBen Gardon 
817bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
818bb18842eSBen Gardon 		return RET_PF_RETRY;
819bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
820bb18842eSBen Gardon 		return RET_PF_RETRY;
821bb18842eSBen Gardon 
822bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
823bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
824bb18842eSBen Gardon 
825bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8267cca2d0bSBen Gardon 
8277cca2d0bSBen Gardon 	rcu_read_lock();
8287cca2d0bSBen Gardon 
829bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
830bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
831bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
832bb18842eSBen Gardon 						   iter.level, &pfn, &level);
833bb18842eSBen Gardon 
834bb18842eSBen Gardon 		if (iter.level == level)
835bb18842eSBen Gardon 			break;
836bb18842eSBen Gardon 
837bb18842eSBen Gardon 		/*
838bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
839bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
840bb18842eSBen Gardon 		 * with a non-leaf SPTE.
841bb18842eSBen Gardon 		 */
842bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
843bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
84408f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8459a77daacSBen Gardon 				break;
846bb18842eSBen Gardon 
847bb18842eSBen Gardon 			/*
848bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
849bb18842eSBen Gardon 			 * because the new value informs the !present
850bb18842eSBen Gardon 			 * path below.
851bb18842eSBen Gardon 			 */
8527cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
853bb18842eSBen Gardon 		}
854bb18842eSBen Gardon 
855bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
85689c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
85789c0fd49SBen Gardon 			child_pt = sp->spt;
858a9442f59SBen Gardon 
859bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
860bb18842eSBen Gardon 						     !shadow_accessed_mask);
861bb18842eSBen Gardon 
8629a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8639a77daacSBen Gardon 						    new_spte)) {
8649a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8659a77daacSBen Gardon 						  huge_page_disallowed &&
8669a77daacSBen Gardon 						  req_level >= iter.level);
8679a77daacSBen Gardon 
868bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8699a77daacSBen Gardon 			} else {
8709a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8719a77daacSBen Gardon 				break;
8729a77daacSBen Gardon 			}
873bb18842eSBen Gardon 		}
874bb18842eSBen Gardon 	}
875bb18842eSBen Gardon 
8769a77daacSBen Gardon 	if (iter.level != level) {
8777cca2d0bSBen Gardon 		rcu_read_unlock();
878bb18842eSBen Gardon 		return RET_PF_RETRY;
8797cca2d0bSBen Gardon 	}
880bb18842eSBen Gardon 
881bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
882bb18842eSBen Gardon 					      pfn, prefault);
8837cca2d0bSBen Gardon 	rcu_read_unlock();
884bb18842eSBen Gardon 
885bb18842eSBen Gardon 	return ret;
886bb18842eSBen Gardon }
887063afacdSBen Gardon 
8883039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
8893039bcc7SSean Christopherson 				 bool flush)
8903039bcc7SSean Christopherson {
8913039bcc7SSean Christopherson 	struct kvm_mmu_page *root;
892c1b91493SSean Christopherson 
8933039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
8943039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
895e1eed584SSean Christopherson 				       range->may_block, flush);
8963039bcc7SSean Christopherson 
8973039bcc7SSean Christopherson 	return flush;
8983039bcc7SSean Christopherson }
8993039bcc7SSean Christopherson 
9003039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
9013039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
9023039bcc7SSean Christopherson 
9033039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
9043039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
905c1b91493SSean Christopherson 						   tdp_handler_t handler)
906063afacdSBen Gardon {
907063afacdSBen Gardon 	struct kvm_mmu_page *root;
9083039bcc7SSean Christopherson 	struct tdp_iter iter;
9093039bcc7SSean Christopherson 	bool ret = false;
910063afacdSBen Gardon 
9113039bcc7SSean Christopherson 	rcu_read_lock();
912063afacdSBen Gardon 
913e1eed584SSean Christopherson 	/*
914e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
915e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
916e1eed584SSean Christopherson 	 */
9173039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
9183039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
9193039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
9203039bcc7SSean Christopherson 	}
921063afacdSBen Gardon 
9223039bcc7SSean Christopherson 	rcu_read_unlock();
923063afacdSBen Gardon 
924063afacdSBen Gardon 	return ret;
925063afacdSBen Gardon }
926063afacdSBen Gardon 
927f8e14497SBen Gardon /*
928f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
929f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
930f8e14497SBen Gardon  */
9313039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
9323039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
933f8e14497SBen Gardon {
9343039bcc7SSean Christopherson 	u64 new_spte = 0;
935f8e14497SBen Gardon 
9363039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
9373039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
9383039bcc7SSean Christopherson 		return false;
9397cca2d0bSBen Gardon 
9403039bcc7SSean Christopherson 	new_spte = iter->old_spte;
941f8e14497SBen Gardon 
942f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
9438f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
944f8e14497SBen Gardon 	} else {
945f8e14497SBen Gardon 		/*
946f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
947f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
948f8e14497SBen Gardon 		 */
949f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
950f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
951f8e14497SBen Gardon 
952f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
953f8e14497SBen Gardon 	}
954f8e14497SBen Gardon 
9553039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
9563039bcc7SSean Christopherson 
9573039bcc7SSean Christopherson 	return true;
958f8e14497SBen Gardon }
959f8e14497SBen Gardon 
9603039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
961f8e14497SBen Gardon {
9623039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
963f8e14497SBen Gardon }
964f8e14497SBen Gardon 
9653039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
9663039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
967f8e14497SBen Gardon {
9683039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
969f8e14497SBen Gardon }
970f8e14497SBen Gardon 
9713039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
972f8e14497SBen Gardon {
9733039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
9743039bcc7SSean Christopherson }
9753039bcc7SSean Christopherson 
9763039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
9773039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
9783039bcc7SSean Christopherson {
9793039bcc7SSean Christopherson 	u64 new_spte;
9803039bcc7SSean Christopherson 
9813039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
9823039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
9833039bcc7SSean Christopherson 
9843039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
9853039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
9863039bcc7SSean Christopherson 		return false;
9873039bcc7SSean Christopherson 
9883039bcc7SSean Christopherson 	/*
9893039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
9903039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
9913039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
9923039bcc7SSean Christopherson 	 * See __handle_changed_spte().
9933039bcc7SSean Christopherson 	 */
9943039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
9953039bcc7SSean Christopherson 
9963039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
9973039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
9983039bcc7SSean Christopherson 								  pte_pfn(range->pte));
9993039bcc7SSean Christopherson 
10003039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
10013039bcc7SSean Christopherson 	}
10023039bcc7SSean Christopherson 
10033039bcc7SSean Christopherson 	return true;
1004f8e14497SBen Gardon }
10051d8dd6b3SBen Gardon 
10061d8dd6b3SBen Gardon /*
10071d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
10081d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
10091d8dd6b3SBen Gardon  * notifier.
10101d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
10111d8dd6b3SBen Gardon  */
10123039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
10131d8dd6b3SBen Gardon {
10143039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
10151d8dd6b3SBen Gardon 
10163039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
10173039bcc7SSean Christopherson 	if (flush)
10183039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
10197cca2d0bSBen Gardon 
10203039bcc7SSean Christopherson 	return false;
10211d8dd6b3SBen Gardon }
10221d8dd6b3SBen Gardon 
1023a6a0b05dSBen Gardon /*
1024a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1025a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1026a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1027a6a0b05dSBen Gardon  */
1028a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1029a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1030a6a0b05dSBen Gardon {
1031a6a0b05dSBen Gardon 	struct tdp_iter iter;
1032a6a0b05dSBen Gardon 	u64 new_spte;
1033a6a0b05dSBen Gardon 	bool spte_set = false;
1034a6a0b05dSBen Gardon 
10357cca2d0bSBen Gardon 	rcu_read_lock();
10367cca2d0bSBen Gardon 
1037a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1038a6a0b05dSBen Gardon 
1039a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1040a6a0b05dSBen Gardon 				   min_level, start, end) {
10411af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10421af4a960SBen Gardon 			continue;
10431af4a960SBen Gardon 
1044a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
10450f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
10460f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1047a6a0b05dSBen Gardon 			continue;
1048a6a0b05dSBen Gardon 
1049a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1050a6a0b05dSBen Gardon 
1051a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1052a6a0b05dSBen Gardon 		spte_set = true;
1053a6a0b05dSBen Gardon 	}
10547cca2d0bSBen Gardon 
10557cca2d0bSBen Gardon 	rcu_read_unlock();
1056a6a0b05dSBen Gardon 	return spte_set;
1057a6a0b05dSBen Gardon }
1058a6a0b05dSBen Gardon 
1059a6a0b05dSBen Gardon /*
1060a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1061a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1062a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1063a6a0b05dSBen Gardon  */
1064a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1065a6a0b05dSBen Gardon 			     int min_level)
1066a6a0b05dSBen Gardon {
1067a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1068a6a0b05dSBen Gardon 	bool spte_set = false;
1069a6a0b05dSBen Gardon 
1070a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1071a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1072a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1073a6a0b05dSBen Gardon 
1074a6a0b05dSBen Gardon 	return spte_set;
1075a6a0b05dSBen Gardon }
1076a6a0b05dSBen Gardon 
1077a6a0b05dSBen Gardon /*
1078a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1079a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1080a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1081a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1082a6a0b05dSBen Gardon  * be flushed.
1083a6a0b05dSBen Gardon  */
1084a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1085a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1086a6a0b05dSBen Gardon {
1087a6a0b05dSBen Gardon 	struct tdp_iter iter;
1088a6a0b05dSBen Gardon 	u64 new_spte;
1089a6a0b05dSBen Gardon 	bool spte_set = false;
1090a6a0b05dSBen Gardon 
10917cca2d0bSBen Gardon 	rcu_read_lock();
10927cca2d0bSBen Gardon 
1093a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
10941af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10951af4a960SBen Gardon 			continue;
10961af4a960SBen Gardon 
1097a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1098a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1099a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1100a6a0b05dSBen Gardon 			else
1101a6a0b05dSBen Gardon 				continue;
1102a6a0b05dSBen Gardon 		} else {
1103a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1104a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1105a6a0b05dSBen Gardon 			else
1106a6a0b05dSBen Gardon 				continue;
1107a6a0b05dSBen Gardon 		}
1108a6a0b05dSBen Gardon 
1109a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1110a6a0b05dSBen Gardon 		spte_set = true;
1111a6a0b05dSBen Gardon 	}
11127cca2d0bSBen Gardon 
11137cca2d0bSBen Gardon 	rcu_read_unlock();
1114a6a0b05dSBen Gardon 	return spte_set;
1115a6a0b05dSBen Gardon }
1116a6a0b05dSBen Gardon 
1117a6a0b05dSBen Gardon /*
1118a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1119a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1120a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1121a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1122a6a0b05dSBen Gardon  * be flushed.
1123a6a0b05dSBen Gardon  */
1124a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1125a6a0b05dSBen Gardon {
1126a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1127a6a0b05dSBen Gardon 	bool spte_set = false;
1128a6a0b05dSBen Gardon 
1129a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1130a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1131a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1132a6a0b05dSBen Gardon 
1133a6a0b05dSBen Gardon 	return spte_set;
1134a6a0b05dSBen Gardon }
1135a6a0b05dSBen Gardon 
1136a6a0b05dSBen Gardon /*
1137a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1138a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1139a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1140a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1141a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1142a6a0b05dSBen Gardon  */
1143a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1144a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1145a6a0b05dSBen Gardon {
1146a6a0b05dSBen Gardon 	struct tdp_iter iter;
1147a6a0b05dSBen Gardon 	u64 new_spte;
1148a6a0b05dSBen Gardon 
11497cca2d0bSBen Gardon 	rcu_read_lock();
11507cca2d0bSBen Gardon 
1151a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1152a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1153a6a0b05dSBen Gardon 		if (!mask)
1154a6a0b05dSBen Gardon 			break;
1155a6a0b05dSBen Gardon 
1156a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1157a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1158a6a0b05dSBen Gardon 			continue;
1159a6a0b05dSBen Gardon 
1160f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1161f1b3b06aSBen Gardon 
1162a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1163a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1164a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1165a6a0b05dSBen Gardon 			else
1166a6a0b05dSBen Gardon 				continue;
1167a6a0b05dSBen Gardon 		} else {
1168a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1169a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1170a6a0b05dSBen Gardon 			else
1171a6a0b05dSBen Gardon 				continue;
1172a6a0b05dSBen Gardon 		}
1173a6a0b05dSBen Gardon 
1174a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1175a6a0b05dSBen Gardon 	}
11767cca2d0bSBen Gardon 
11777cca2d0bSBen Gardon 	rcu_read_unlock();
1178a6a0b05dSBen Gardon }
1179a6a0b05dSBen Gardon 
1180a6a0b05dSBen Gardon /*
1181a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1182a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1183a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1184a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1185a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1186a6a0b05dSBen Gardon  */
1187a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1188a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1189a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1190a6a0b05dSBen Gardon 				       bool wrprot)
1191a6a0b05dSBen Gardon {
1192a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1193a6a0b05dSBen Gardon 
1194531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1195a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1196a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1197a6a0b05dSBen Gardon }
1198a6a0b05dSBen Gardon 
1199a6a0b05dSBen Gardon /*
120087aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
120187aa9ec9SBen Gardon  * GFNs within the slot.
120214881998SBen Gardon  */
1203af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
120414881998SBen Gardon 				       struct kvm_mmu_page *root,
12058ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1206af95b53eSSean Christopherson 				       bool flush)
120714881998SBen Gardon {
12089eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
12099eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
121014881998SBen Gardon 	struct tdp_iter iter;
121114881998SBen Gardon 	kvm_pfn_t pfn;
121214881998SBen Gardon 
12137cca2d0bSBen Gardon 	rcu_read_lock();
12147cca2d0bSBen Gardon 
121514881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
1216af95b53eSSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
1217af95b53eSSean Christopherson 			flush = false;
12181af4a960SBen Gardon 			continue;
12191af4a960SBen Gardon 		}
12201af4a960SBen Gardon 
122114881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
122287aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
122314881998SBen Gardon 			continue;
122414881998SBen Gardon 
122514881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
122614881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
12279eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
12289eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
122914881998SBen Gardon 			continue;
123014881998SBen Gardon 
123114881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
123214881998SBen Gardon 
1233af95b53eSSean Christopherson 		flush = true;
123414881998SBen Gardon 	}
123514881998SBen Gardon 
12367cca2d0bSBen Gardon 	rcu_read_unlock();
1237af95b53eSSean Christopherson 
1238af95b53eSSean Christopherson 	return flush;
123914881998SBen Gardon }
124014881998SBen Gardon 
124114881998SBen Gardon /*
124214881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
124314881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
124414881998SBen Gardon  */
1245142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
12468ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
12478ca6f063SBen Gardon 				       bool flush)
124814881998SBen Gardon {
124914881998SBen Gardon 	struct kvm_mmu_page *root;
125014881998SBen Gardon 
1251a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1252af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1253af95b53eSSean Christopherson 
1254142ccde1SSean Christopherson 	return flush;
125514881998SBen Gardon }
125646044f72SBen Gardon 
125746044f72SBen Gardon /*
125846044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12595fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
126046044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
126146044f72SBen Gardon  */
126246044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
126346044f72SBen Gardon 			      gfn_t gfn)
126446044f72SBen Gardon {
126546044f72SBen Gardon 	struct tdp_iter iter;
126646044f72SBen Gardon 	u64 new_spte;
126746044f72SBen Gardon 	bool spte_set = false;
126846044f72SBen Gardon 
12697cca2d0bSBen Gardon 	rcu_read_lock();
12707cca2d0bSBen Gardon 
127146044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
127246044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
127346044f72SBen Gardon 			break;
127446044f72SBen Gardon 
127546044f72SBen Gardon 		new_spte = iter.old_spte &
12765fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
127746044f72SBen Gardon 
127846044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
127946044f72SBen Gardon 		spte_set = true;
128046044f72SBen Gardon 	}
128146044f72SBen Gardon 
12827cca2d0bSBen Gardon 	rcu_read_unlock();
12837cca2d0bSBen Gardon 
128446044f72SBen Gardon 	return spte_set;
128546044f72SBen Gardon }
128646044f72SBen Gardon 
128746044f72SBen Gardon /*
128846044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12895fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
129046044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
129146044f72SBen Gardon  */
129246044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
129346044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
129446044f72SBen Gardon {
129546044f72SBen Gardon 	struct kvm_mmu_page *root;
129646044f72SBen Gardon 	bool spte_set = false;
129746044f72SBen Gardon 
1298531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1299a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
130046044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
1301a3f15bdaSSean Christopherson 
130246044f72SBen Gardon 	return spte_set;
130346044f72SBen Gardon }
130446044f72SBen Gardon 
130595fb5b02SBen Gardon /*
130695fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
130795fb5b02SBen Gardon  * That SPTE may be non-present.
130895fb5b02SBen Gardon  */
130939b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
131039b4d43eSSean Christopherson 			 int *root_level)
131195fb5b02SBen Gardon {
131295fb5b02SBen Gardon 	struct tdp_iter iter;
131395fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
131495fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
13152aa07893SSean Christopherson 	int leaf = -1;
131695fb5b02SBen Gardon 
131739b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
131895fb5b02SBen Gardon 
13197cca2d0bSBen Gardon 	rcu_read_lock();
13207cca2d0bSBen Gardon 
132195fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
132295fb5b02SBen Gardon 		leaf = iter.level;
1323dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
132495fb5b02SBen Gardon 	}
132595fb5b02SBen Gardon 
13267cca2d0bSBen Gardon 	rcu_read_unlock();
13277cca2d0bSBen Gardon 
132895fb5b02SBen Gardon 	return leaf;
132995fb5b02SBen Gardon }
1330