xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision e1eed584)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
44a889ea54SBen Gardon static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
45a889ea54SBen Gardon {
46a889ea54SBen Gardon 	if (kvm_mmu_put_root(kvm, root))
47a889ea54SBen Gardon 		kvm_tdp_mmu_free_root(kvm, root);
48a889ea54SBen Gardon }
49a889ea54SBen Gardon 
50a889ea54SBen Gardon static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
51a889ea54SBen Gardon 					   struct kvm_mmu_page *root)
52a889ea54SBen Gardon {
53531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
54a889ea54SBen Gardon 
55a889ea54SBen Gardon 	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
56a889ea54SBen Gardon 		return false;
57a889ea54SBen Gardon 
58a889ea54SBen Gardon 	kvm_mmu_get_root(kvm, root);
59a889ea54SBen Gardon 	return true;
60a889ea54SBen Gardon 
61a889ea54SBen Gardon }
62a889ea54SBen Gardon 
63a889ea54SBen Gardon static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
64a889ea54SBen Gardon 						     struct kvm_mmu_page *root)
65a889ea54SBen Gardon {
66a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
67a889ea54SBen Gardon 
68a889ea54SBen Gardon 	next_root = list_next_entry(root, link);
69a889ea54SBen Gardon 	tdp_mmu_put_root(kvm, root);
70a889ea54SBen Gardon 	return next_root;
71a889ea54SBen Gardon }
72a889ea54SBen Gardon 
73a889ea54SBen Gardon /*
74a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
75a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
76a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
77a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
78a889ea54SBen Gardon  */
79a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)		\
80a889ea54SBen Gardon 	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
81a889ea54SBen Gardon 				      typeof(*_root), link);		\
82a889ea54SBen Gardon 	     tdp_mmu_next_root_valid(_kvm, _root);			\
83a3f15bdaSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root))			\
84a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
85a3f15bdaSSean Christopherson 		} else
86a889ea54SBen Gardon 
87a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
88a3f15bdaSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
89a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
90a3f15bdaSSean Christopherson 		} else
9102c00b3aSBen Gardon 
92faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
93a835429cSSean Christopherson 			  gfn_t start, gfn_t end, bool can_yield, bool flush);
94faaf05b0SBen Gardon 
9502c00b3aSBen Gardon void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
9602c00b3aSBen Gardon {
97339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
98faaf05b0SBen Gardon 
99531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
10002c00b3aSBen Gardon 
10102c00b3aSBen Gardon 	WARN_ON(root->root_count);
10202c00b3aSBen Gardon 	WARN_ON(!root->tdp_mmu_page);
10302c00b3aSBen Gardon 
10402c00b3aSBen Gardon 	list_del(&root->link);
10502c00b3aSBen Gardon 
106a835429cSSean Christopherson 	zap_gfn_range(kvm, root, 0, max_gfn, false, false);
107faaf05b0SBen Gardon 
10802c00b3aSBen Gardon 	free_page((unsigned long)root->spt);
10902c00b3aSBen Gardon 	kmem_cache_free(mmu_page_header_cache, root);
11002c00b3aSBen Gardon }
11102c00b3aSBen Gardon 
11202c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
11302c00b3aSBen Gardon 						   int level)
11402c00b3aSBen Gardon {
11502c00b3aSBen Gardon 	union kvm_mmu_page_role role;
11602c00b3aSBen Gardon 
11702c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
11802c00b3aSBen Gardon 	role.level = level;
11902c00b3aSBen Gardon 	role.direct = true;
12002c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
12102c00b3aSBen Gardon 	role.access = ACC_ALL;
12202c00b3aSBen Gardon 
12302c00b3aSBen Gardon 	return role;
12402c00b3aSBen Gardon }
12502c00b3aSBen Gardon 
12602c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
12702c00b3aSBen Gardon 					       int level)
12802c00b3aSBen Gardon {
12902c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
13002c00b3aSBen Gardon 
13102c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
13202c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
13302c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
13402c00b3aSBen Gardon 
13502c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
13602c00b3aSBen Gardon 	sp->gfn = gfn;
13702c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
13802c00b3aSBen Gardon 
13933dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
14033dd3574SBen Gardon 
14102c00b3aSBen Gardon 	return sp;
14202c00b3aSBen Gardon }
14302c00b3aSBen Gardon 
1446e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
14502c00b3aSBen Gardon {
14602c00b3aSBen Gardon 	union kvm_mmu_page_role role;
14702c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
14802c00b3aSBen Gardon 	struct kvm_mmu_page *root;
14902c00b3aSBen Gardon 
1506e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
15102c00b3aSBen Gardon 
1526e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
15302c00b3aSBen Gardon 
15402c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
155a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
15602c00b3aSBen Gardon 		if (root->role.word == role.word) {
15702c00b3aSBen Gardon 			kvm_mmu_get_root(kvm, root);
1586e6ec584SSean Christopherson 			goto out;
15902c00b3aSBen Gardon 		}
16002c00b3aSBen Gardon 	}
16102c00b3aSBen Gardon 
16202c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
16302c00b3aSBen Gardon 	root->root_count = 1;
16402c00b3aSBen Gardon 
16502c00b3aSBen Gardon 	list_add(&root->link, &kvm->arch.tdp_mmu_roots);
16602c00b3aSBen Gardon 
1676e6ec584SSean Christopherson out:
16802c00b3aSBen Gardon 	return __pa(root->spt);
169fe5db27dSBen Gardon }
1702f2fad08SBen Gardon 
1717cca2d0bSBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
1727cca2d0bSBen Gardon {
1737cca2d0bSBen Gardon 	free_page((unsigned long)sp->spt);
1747cca2d0bSBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
1757cca2d0bSBen Gardon }
1767cca2d0bSBen Gardon 
1777cca2d0bSBen Gardon /*
1787cca2d0bSBen Gardon  * This is called through call_rcu in order to free TDP page table memory
1797cca2d0bSBen Gardon  * safely with respect to other kernel threads that may be operating on
1807cca2d0bSBen Gardon  * the memory.
1817cca2d0bSBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
1827cca2d0bSBen Gardon  * section, and freeing it after a grace period, lockless access to that
1837cca2d0bSBen Gardon  * memory won't use it after it is freed.
1847cca2d0bSBen Gardon  */
1857cca2d0bSBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
1867cca2d0bSBen Gardon {
1877cca2d0bSBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
1887cca2d0bSBen Gardon 					       rcu_head);
1897cca2d0bSBen Gardon 
1907cca2d0bSBen Gardon 	tdp_mmu_free_sp(sp);
1917cca2d0bSBen Gardon }
1927cca2d0bSBen Gardon 
1932f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1949a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
1959a77daacSBen Gardon 				bool shared);
1962f2fad08SBen Gardon 
197f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
198f8e14497SBen Gardon {
199f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
200f8e14497SBen Gardon 		return;
201f8e14497SBen Gardon 
202f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
20364bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
20464bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
205f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
206f8e14497SBen Gardon }
207f8e14497SBen Gardon 
208a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
209a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
210a6a0b05dSBen Gardon {
211a6a0b05dSBen Gardon 	bool pfn_changed;
212a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
213a6a0b05dSBen Gardon 
214a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
215a6a0b05dSBen Gardon 		return;
216a6a0b05dSBen Gardon 
217a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
218a6a0b05dSBen Gardon 
219a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
220a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
221a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
222fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
223a6a0b05dSBen Gardon 	}
224a6a0b05dSBen Gardon }
225a6a0b05dSBen Gardon 
2262f2fad08SBen Gardon /**
227a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
228a9442f59SBen Gardon  *
229a9442f59SBen Gardon  * @kvm: kvm instance
230a9442f59SBen Gardon  * @sp: the new page
2319a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2329a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2339a77daacSBen Gardon  *	    threads that might be adding or removing pages.
234a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
235a9442f59SBen Gardon  *		eventual reclaim.
236a9442f59SBen Gardon  */
237a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2389a77daacSBen Gardon 			      bool shared, bool account_nx)
239a9442f59SBen Gardon {
2409a77daacSBen Gardon 	if (shared)
2419a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2429a77daacSBen Gardon 	else
243a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
244a9442f59SBen Gardon 
245a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
246a9442f59SBen Gardon 	if (account_nx)
247a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2489a77daacSBen Gardon 
2499a77daacSBen Gardon 	if (shared)
2509a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
251a9442f59SBen Gardon }
252a9442f59SBen Gardon 
253a9442f59SBen Gardon /**
254a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
255a9442f59SBen Gardon  *
256a9442f59SBen Gardon  * @kvm: kvm instance
257a9442f59SBen Gardon  * @sp: the page to be removed
2589a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2599a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2609a77daacSBen Gardon  *	    threads that might be adding or removing pages.
261a9442f59SBen Gardon  */
2629a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2639a77daacSBen Gardon 				bool shared)
264a9442f59SBen Gardon {
2659a77daacSBen Gardon 	if (shared)
2669a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2679a77daacSBen Gardon 	else
268a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
269a9442f59SBen Gardon 
270a9442f59SBen Gardon 	list_del(&sp->link);
271a9442f59SBen Gardon 	if (sp->lpage_disallowed)
272a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2739a77daacSBen Gardon 
2749a77daacSBen Gardon 	if (shared)
2759a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
276a9442f59SBen Gardon }
277a9442f59SBen Gardon 
278a9442f59SBen Gardon /**
279a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
280a066e61fSBen Gardon  *
281a066e61fSBen Gardon  * @kvm: kvm instance
282a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2839a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2849a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
2859a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
286a066e61fSBen Gardon  *
287a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
288a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
28970fb3e41SBen Gardon  *
29070fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
29170fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
29270fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
29370fb3e41SBen Gardon  * early rcu_dereferences in the function.
294a066e61fSBen Gardon  */
29570fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
2969a77daacSBen Gardon 					bool shared)
297a066e61fSBen Gardon {
29870fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
299a066e61fSBen Gardon 	int level = sp->role.level;
300e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
301a066e61fSBen Gardon 	u64 old_child_spte;
3029a77daacSBen Gardon 	u64 *sptep;
303e25f0e0cSBen Gardon 	gfn_t gfn;
304a066e61fSBen Gardon 	int i;
305a066e61fSBen Gardon 
306a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
307a066e61fSBen Gardon 
3089a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
309a066e61fSBen Gardon 
310a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
31170fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
312e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3139a77daacSBen Gardon 
3149a77daacSBen Gardon 		if (shared) {
315e25f0e0cSBen Gardon 			/*
316e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
317e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
318e25f0e0cSBen Gardon 			 * already marked as removed then another thread
319e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
320e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
321e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
322e25f0e0cSBen Gardon 			 */
323e25f0e0cSBen Gardon 			for (;;) {
324e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
325e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
326e25f0e0cSBen Gardon 					break;
327e25f0e0cSBen Gardon 				cpu_relax();
328e25f0e0cSBen Gardon 			}
3299a77daacSBen Gardon 		} else {
3308df9f1afSSean Christopherson 			/*
3318df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3328df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3338df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3348df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3358df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3368df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3378df9f1afSSean Christopherson 			 * unreachable.
3388df9f1afSSean Christopherson 			 */
3399a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3408df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3418df9f1afSSean Christopherson 				continue;
342e25f0e0cSBen Gardon 
343e25f0e0cSBen Gardon 			/*
344e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
345e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
346e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
347e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
348e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
349e25f0e0cSBen Gardon 			 * the function.
350e25f0e0cSBen Gardon 			 */
351e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3529a77daacSBen Gardon 		}
353e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
354e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
355e25f0e0cSBen Gardon 				    shared);
356a066e61fSBen Gardon 	}
357a066e61fSBen Gardon 
358a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
359a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
360a066e61fSBen Gardon 
3617cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
362a066e61fSBen Gardon }
363a066e61fSBen Gardon 
364a066e61fSBen Gardon /**
3652f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3662f2fad08SBen Gardon  * @kvm: kvm instance
3672f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3682f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3692f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3702f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3712f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3729a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3739a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3749a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3752f2fad08SBen Gardon  *
3762f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3772f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3782f2fad08SBen Gardon  */
3792f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3809a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3819a77daacSBen Gardon 				  bool shared)
3822f2fad08SBen Gardon {
3832f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3842f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3852f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3862f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3872f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
3882f2fad08SBen Gardon 
3892f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
3902f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
391764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
3922f2fad08SBen Gardon 
3932f2fad08SBen Gardon 	/*
3942f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
3952f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
3962f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
3972f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
3982f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
3992f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4002f2fad08SBen Gardon 	 * thread before replacement.
4012f2fad08SBen Gardon 	 */
4022f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4032f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4042f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4052f2fad08SBen Gardon 		       "different PFN!\n"
4062f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4072f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4082f2fad08SBen Gardon 
4092f2fad08SBen Gardon 		/*
4102f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4112f2fad08SBen Gardon 		 * courruption.
4122f2fad08SBen Gardon 		 */
4132f2fad08SBen Gardon 		BUG();
4142f2fad08SBen Gardon 	}
4152f2fad08SBen Gardon 
4162f2fad08SBen Gardon 	if (old_spte == new_spte)
4172f2fad08SBen Gardon 		return;
4182f2fad08SBen Gardon 
419b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
420b9a98c34SBen Gardon 
4212f2fad08SBen Gardon 	/*
4222f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4232f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4242f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4252f2fad08SBen Gardon 	 */
4262f2fad08SBen Gardon 	if (!was_present && !is_present) {
4272f2fad08SBen Gardon 		/*
42808f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
42908f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
43008f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
43108f07c80SBen Gardon 		 * are nonpresent.
4322f2fad08SBen Gardon 		 */
43308f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
43408f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
43508f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4362f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4372f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4382f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
43908f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
44008f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4412f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4422f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4432f2fad08SBen Gardon 		return;
4442f2fad08SBen Gardon 	}
4452f2fad08SBen Gardon 
4462f2fad08SBen Gardon 
4472f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
44864bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4492f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4502f2fad08SBen Gardon 
4512f2fad08SBen Gardon 	/*
4522f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4532f2fad08SBen Gardon 	 * the paging structure.
4542f2fad08SBen Gardon 	 */
455a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
456a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4579a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4582f2fad08SBen Gardon }
4592f2fad08SBen Gardon 
4602f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4619a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4629a77daacSBen Gardon 				bool shared)
4632f2fad08SBen Gardon {
4649a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4659a77daacSBen Gardon 			      shared);
466f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
467a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
468a6a0b05dSBen Gardon 				      new_spte, level);
4692f2fad08SBen Gardon }
470faaf05b0SBen Gardon 
471fe43fa2fSBen Gardon /*
4729a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4739a77daacSBen Gardon  * associated bookkeeping
4749a77daacSBen Gardon  *
4759a77daacSBen Gardon  * @kvm: kvm instance
4769a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4779a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4789a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4799a77daacSBen Gardon  *	    this function will have no side-effects.
4809a77daacSBen Gardon  */
4819a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4829a77daacSBen Gardon 					   struct tdp_iter *iter,
4839a77daacSBen Gardon 					   u64 new_spte)
4849a77daacSBen Gardon {
4859a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4869a77daacSBen Gardon 
48708f07c80SBen Gardon 	/*
48808f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
48908f07c80SBen Gardon 	 * may modify it.
49008f07c80SBen Gardon 	 */
4917a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
49208f07c80SBen Gardon 		return false;
49308f07c80SBen Gardon 
4949a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
4959a77daacSBen Gardon 		      new_spte) != iter->old_spte)
4969a77daacSBen Gardon 		return false;
4979a77daacSBen Gardon 
49808889894SSean Christopherson 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
49908889894SSean Christopherson 			    new_spte, iter->level, true);
5009a77daacSBen Gardon 
5019a77daacSBen Gardon 	return true;
5029a77daacSBen Gardon }
5039a77daacSBen Gardon 
50408f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
50508f07c80SBen Gardon 					   struct tdp_iter *iter)
50608f07c80SBen Gardon {
50708f07c80SBen Gardon 	/*
50808f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
50908f07c80SBen Gardon 	 * non-present value. This will stop other threads from
51008f07c80SBen Gardon 	 * immediately installing a present entry in its place
51108f07c80SBen Gardon 	 * before the TLBs are flushed.
51208f07c80SBen Gardon 	 */
51308f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
51408f07c80SBen Gardon 		return false;
51508f07c80SBen Gardon 
51608f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
51708f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
51808f07c80SBen Gardon 
51908f07c80SBen Gardon 	/*
52008f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
52108f07c80SBen Gardon 	 * must either wait on the MMU lock or use
52208f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
52308f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
52408f07c80SBen Gardon 	 * here since the SPTE is going from non-present
52508f07c80SBen Gardon 	 * to non-present.
52608f07c80SBen Gardon 	 */
52714f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
52808f07c80SBen Gardon 
52908f07c80SBen Gardon 	return true;
53008f07c80SBen Gardon }
53108f07c80SBen Gardon 
5329a77daacSBen Gardon 
5339a77daacSBen Gardon /*
534fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
535fe43fa2fSBen Gardon  * @kvm: kvm instance
536fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
537fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
538fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
539fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
540fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
541fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
542fe43fa2fSBen Gardon  *		      double counted.
543fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
544fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
545fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
546fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
547fe43fa2fSBen Gardon  *		      writes from being double counted.
548fe43fa2fSBen Gardon  */
549f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
550a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
551a6a0b05dSBen Gardon 				      bool record_dirty_log)
552faaf05b0SBen Gardon {
553531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5543a9a4aa5SBen Gardon 
55508f07c80SBen Gardon 	/*
55608f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
55708f07c80SBen Gardon 	 * temporary removed SPTE value.
55808f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
55908f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
56008f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
56108f07c80SBen Gardon 	 */
5627a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
56308f07c80SBen Gardon 
5647cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
565faaf05b0SBen Gardon 
56608889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
56708889894SSean Christopherson 			      new_spte, iter->level, false);
568f8e14497SBen Gardon 	if (record_acc_track)
569f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
570f8e14497SBen Gardon 					      iter->level);
571a6a0b05dSBen Gardon 	if (record_dirty_log)
57208889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
573a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
574a6a0b05dSBen Gardon 					      iter->level);
575f8e14497SBen Gardon }
576f8e14497SBen Gardon 
577f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
578f8e14497SBen Gardon 				    u64 new_spte)
579f8e14497SBen Gardon {
580a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
581f8e14497SBen Gardon }
582f8e14497SBen Gardon 
583f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
584f8e14497SBen Gardon 						 struct tdp_iter *iter,
585f8e14497SBen Gardon 						 u64 new_spte)
586f8e14497SBen Gardon {
587a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
588a6a0b05dSBen Gardon }
589a6a0b05dSBen Gardon 
590a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
591a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
592a6a0b05dSBen Gardon 						 u64 new_spte)
593a6a0b05dSBen Gardon {
594a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
595faaf05b0SBen Gardon }
596faaf05b0SBen Gardon 
597faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
598faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
599faaf05b0SBen Gardon 
600f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
601f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
602f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
603f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
604f8e14497SBen Gardon 			continue;					\
605f8e14497SBen Gardon 		else
606f8e14497SBen Gardon 
607bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
608bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
609bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
610bb18842eSBen Gardon 
611faaf05b0SBen Gardon /*
612e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
613e28a436cSBen Gardon  * to the scheduler.
614e28a436cSBen Gardon  *
615e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
616e139a34eSBen Gardon  * TLB flush before yielding.
617e139a34eSBen Gardon  *
618e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
619ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
620ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
621ed5e484bSBen Gardon  * paging structure root.
622e28a436cSBen Gardon  *
623e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
624e28a436cSBen Gardon  * Return false if a yield was not needed.
625e28a436cSBen Gardon  */
626e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
627e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
628a6a0b05dSBen Gardon {
629ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
630ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
631ed5e484bSBen Gardon 		return false;
632ed5e484bSBen Gardon 
633531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6347cca2d0bSBen Gardon 		rcu_read_unlock();
6357cca2d0bSBen Gardon 
636e139a34eSBen Gardon 		if (flush)
637e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
638e139a34eSBen Gardon 
639531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6407cca2d0bSBen Gardon 		rcu_read_lock();
641ed5e484bSBen Gardon 
642ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
643ed5e484bSBen Gardon 
644b601c3bcSBen Gardon 		tdp_iter_restart(iter);
645ed5e484bSBen Gardon 
646e28a436cSBen Gardon 		return true;
647a6a0b05dSBen Gardon 	}
648e28a436cSBen Gardon 
649e28a436cSBen Gardon 	return false;
650a6a0b05dSBen Gardon }
651a6a0b05dSBen Gardon 
652faaf05b0SBen Gardon /*
653faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
654faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
655faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
656faaf05b0SBen Gardon  * MMU lock.
657063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
658063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
659063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
660063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
661a835429cSSean Christopherson  * operation can cause a soft lockup.  Note, in some use cases a flush may be
662a835429cSSean Christopherson  * required by prior actions.  Ensure the pending flush is performed prior to
663a835429cSSean Christopherson  * yielding.
664faaf05b0SBen Gardon  */
665faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
666a835429cSSean Christopherson 			  gfn_t start, gfn_t end, bool can_yield, bool flush)
667faaf05b0SBen Gardon {
668faaf05b0SBen Gardon 	struct tdp_iter iter;
669faaf05b0SBen Gardon 
6707cca2d0bSBen Gardon 	rcu_read_lock();
6717cca2d0bSBen Gardon 
672faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6731af4a960SBen Gardon 		if (can_yield &&
674a835429cSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
675a835429cSSean Christopherson 			flush = false;
6761af4a960SBen Gardon 			continue;
6771af4a960SBen Gardon 		}
6781af4a960SBen Gardon 
679faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
680faaf05b0SBen Gardon 			continue;
681faaf05b0SBen Gardon 
682faaf05b0SBen Gardon 		/*
683faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
684faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
685faaf05b0SBen Gardon 		 * lower level.
686faaf05b0SBen Gardon 		 */
687faaf05b0SBen Gardon 		if ((iter.gfn < start ||
688faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
689faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
690faaf05b0SBen Gardon 			continue;
691faaf05b0SBen Gardon 
692faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
693a835429cSSean Christopherson 		flush = true;
694faaf05b0SBen Gardon 	}
6957cca2d0bSBen Gardon 
6967cca2d0bSBen Gardon 	rcu_read_unlock();
697a835429cSSean Christopherson 	return flush;
698faaf05b0SBen Gardon }
699faaf05b0SBen Gardon 
700faaf05b0SBen Gardon /*
701faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
702faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
703faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
704faaf05b0SBen Gardon  * MMU lock.
705faaf05b0SBen Gardon  */
7062b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7072b9663d8SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
708faaf05b0SBen Gardon {
709faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
710faaf05b0SBen Gardon 
711a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
71233a31641SSean Christopherson 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
713faaf05b0SBen Gardon 
714faaf05b0SBen Gardon 	return flush;
715faaf05b0SBen Gardon }
716faaf05b0SBen Gardon 
717faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
718faaf05b0SBen Gardon {
719339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
7202b9663d8SSean Christopherson 	bool flush = false;
7212b9663d8SSean Christopherson 	int i;
722faaf05b0SBen Gardon 
7232b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7242b9663d8SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
7252b9663d8SSean Christopherson 
726faaf05b0SBen Gardon 	if (flush)
727faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
728faaf05b0SBen Gardon }
729bb18842eSBen Gardon 
730bb18842eSBen Gardon /*
731bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
732bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
733bb18842eSBen Gardon  */
734bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
735bb18842eSBen Gardon 					  int map_writable,
736bb18842eSBen Gardon 					  struct tdp_iter *iter,
737bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
738bb18842eSBen Gardon {
739bb18842eSBen Gardon 	u64 new_spte;
740bb18842eSBen Gardon 	int ret = 0;
741bb18842eSBen Gardon 	int make_spte_ret = 0;
742bb18842eSBen Gardon 
7439a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
744bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7459a77daacSBen Gardon 	else
746bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
747bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
748bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
749bb18842eSBen Gardon 					 &new_spte);
750bb18842eSBen Gardon 
751bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
752bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7539a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7549a77daacSBen Gardon 		return RET_PF_RETRY;
755bb18842eSBen Gardon 
756bb18842eSBen Gardon 	/*
757bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
758bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
759bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
760bb18842eSBen Gardon 	 */
761bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
762bb18842eSBen Gardon 		if (write)
763bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
764bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
765bb18842eSBen Gardon 	}
766bb18842eSBen Gardon 
767bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7689a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7699a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7709a77daacSBen Gardon 				     new_spte);
771bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
7723849e092SSean Christopherson 	} else {
7739a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7749a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
7753849e092SSean Christopherson 	}
776bb18842eSBen Gardon 
777bb18842eSBen Gardon 	if (!prefault)
778bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
779bb18842eSBen Gardon 
780bb18842eSBen Gardon 	return ret;
781bb18842eSBen Gardon }
782bb18842eSBen Gardon 
783bb18842eSBen Gardon /*
784bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
785bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
786bb18842eSBen Gardon  */
787bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
788bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
789bb18842eSBen Gardon 		    bool prefault)
790bb18842eSBen Gardon {
791bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
792bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
793bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
794bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
795bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
796bb18842eSBen Gardon 	struct tdp_iter iter;
79789c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
798bb18842eSBen Gardon 	u64 *child_pt;
799bb18842eSBen Gardon 	u64 new_spte;
800bb18842eSBen Gardon 	int ret;
801bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
802bb18842eSBen Gardon 	int level;
803bb18842eSBen Gardon 	int req_level;
804bb18842eSBen Gardon 
805bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
806bb18842eSBen Gardon 		return RET_PF_RETRY;
807bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
808bb18842eSBen Gardon 		return RET_PF_RETRY;
809bb18842eSBen Gardon 
810bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
811bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
812bb18842eSBen Gardon 
813bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8147cca2d0bSBen Gardon 
8157cca2d0bSBen Gardon 	rcu_read_lock();
8167cca2d0bSBen Gardon 
817bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
818bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
819bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
820bb18842eSBen Gardon 						   iter.level, &pfn, &level);
821bb18842eSBen Gardon 
822bb18842eSBen Gardon 		if (iter.level == level)
823bb18842eSBen Gardon 			break;
824bb18842eSBen Gardon 
825bb18842eSBen Gardon 		/*
826bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
827bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
828bb18842eSBen Gardon 		 * with a non-leaf SPTE.
829bb18842eSBen Gardon 		 */
830bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
831bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
83208f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8339a77daacSBen Gardon 				break;
834bb18842eSBen Gardon 
835bb18842eSBen Gardon 			/*
836bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
837bb18842eSBen Gardon 			 * because the new value informs the !present
838bb18842eSBen Gardon 			 * path below.
839bb18842eSBen Gardon 			 */
8407cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
841bb18842eSBen Gardon 		}
842bb18842eSBen Gardon 
843bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
84489c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
84589c0fd49SBen Gardon 			child_pt = sp->spt;
846a9442f59SBen Gardon 
847bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
848bb18842eSBen Gardon 						     !shadow_accessed_mask);
849bb18842eSBen Gardon 
8509a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8519a77daacSBen Gardon 						    new_spte)) {
8529a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8539a77daacSBen Gardon 						  huge_page_disallowed &&
8549a77daacSBen Gardon 						  req_level >= iter.level);
8559a77daacSBen Gardon 
856bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8579a77daacSBen Gardon 			} else {
8589a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8599a77daacSBen Gardon 				break;
8609a77daacSBen Gardon 			}
861bb18842eSBen Gardon 		}
862bb18842eSBen Gardon 	}
863bb18842eSBen Gardon 
8649a77daacSBen Gardon 	if (iter.level != level) {
8657cca2d0bSBen Gardon 		rcu_read_unlock();
866bb18842eSBen Gardon 		return RET_PF_RETRY;
8677cca2d0bSBen Gardon 	}
868bb18842eSBen Gardon 
869bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
870bb18842eSBen Gardon 					      pfn, prefault);
8717cca2d0bSBen Gardon 	rcu_read_unlock();
872bb18842eSBen Gardon 
873bb18842eSBen Gardon 	return ret;
874bb18842eSBen Gardon }
875063afacdSBen Gardon 
8763039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
8773039bcc7SSean Christopherson 				 bool flush)
8783039bcc7SSean Christopherson {
8793039bcc7SSean Christopherson 	struct kvm_mmu_page *root;
880c1b91493SSean Christopherson 
8813039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
8823039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
883*e1eed584SSean Christopherson 				       range->may_block, flush);
8843039bcc7SSean Christopherson 
8853039bcc7SSean Christopherson 	return flush;
8863039bcc7SSean Christopherson }
8873039bcc7SSean Christopherson 
8883039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
8893039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
8903039bcc7SSean Christopherson 
8913039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
8923039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
893c1b91493SSean Christopherson 						   tdp_handler_t handler)
894063afacdSBen Gardon {
895063afacdSBen Gardon 	struct kvm_mmu_page *root;
8963039bcc7SSean Christopherson 	struct tdp_iter iter;
8973039bcc7SSean Christopherson 	bool ret = false;
898063afacdSBen Gardon 
8993039bcc7SSean Christopherson 	rcu_read_lock();
900063afacdSBen Gardon 
901*e1eed584SSean Christopherson 	/*
902*e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
903*e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
904*e1eed584SSean Christopherson 	 */
9053039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
9063039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
9073039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
9083039bcc7SSean Christopherson 	}
909063afacdSBen Gardon 
9103039bcc7SSean Christopherson 	rcu_read_unlock();
911063afacdSBen Gardon 
912063afacdSBen Gardon 	return ret;
913063afacdSBen Gardon }
914063afacdSBen Gardon 
915f8e14497SBen Gardon /*
916f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
917f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
918f8e14497SBen Gardon  */
9193039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
9203039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
921f8e14497SBen Gardon {
9223039bcc7SSean Christopherson 	u64 new_spte = 0;
923f8e14497SBen Gardon 
9243039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
9253039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
9263039bcc7SSean Christopherson 		return false;
9277cca2d0bSBen Gardon 
9283039bcc7SSean Christopherson 	new_spte = iter->old_spte;
929f8e14497SBen Gardon 
930f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
9318f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
932f8e14497SBen Gardon 	} else {
933f8e14497SBen Gardon 		/*
934f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
935f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
936f8e14497SBen Gardon 		 */
937f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
938f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
939f8e14497SBen Gardon 
940f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
941f8e14497SBen Gardon 	}
942f8e14497SBen Gardon 
9433039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
9443039bcc7SSean Christopherson 
9453039bcc7SSean Christopherson 	return true;
946f8e14497SBen Gardon }
947f8e14497SBen Gardon 
9483039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
949f8e14497SBen Gardon {
9503039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
951f8e14497SBen Gardon }
952f8e14497SBen Gardon 
9533039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
9543039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
955f8e14497SBen Gardon {
9563039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
957f8e14497SBen Gardon }
958f8e14497SBen Gardon 
9593039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
960f8e14497SBen Gardon {
9613039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
9623039bcc7SSean Christopherson }
9633039bcc7SSean Christopherson 
9643039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
9653039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
9663039bcc7SSean Christopherson {
9673039bcc7SSean Christopherson 	u64 new_spte;
9683039bcc7SSean Christopherson 
9693039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
9703039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
9713039bcc7SSean Christopherson 
9723039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
9733039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
9743039bcc7SSean Christopherson 		return false;
9753039bcc7SSean Christopherson 
9763039bcc7SSean Christopherson 	/*
9773039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
9783039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
9793039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
9803039bcc7SSean Christopherson 	 * See __handle_changed_spte().
9813039bcc7SSean Christopherson 	 */
9823039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
9833039bcc7SSean Christopherson 
9843039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
9853039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
9863039bcc7SSean Christopherson 								  pte_pfn(range->pte));
9873039bcc7SSean Christopherson 
9883039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
9893039bcc7SSean Christopherson 	}
9903039bcc7SSean Christopherson 
9913039bcc7SSean Christopherson 	return true;
992f8e14497SBen Gardon }
9931d8dd6b3SBen Gardon 
9941d8dd6b3SBen Gardon /*
9951d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
9961d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
9971d8dd6b3SBen Gardon  * notifier.
9981d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
9991d8dd6b3SBen Gardon  */
10003039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
10011d8dd6b3SBen Gardon {
10023039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
10031d8dd6b3SBen Gardon 
10043039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
10053039bcc7SSean Christopherson 	if (flush)
10063039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
10077cca2d0bSBen Gardon 
10083039bcc7SSean Christopherson 	return false;
10091d8dd6b3SBen Gardon }
10101d8dd6b3SBen Gardon 
1011a6a0b05dSBen Gardon /*
1012a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1013a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1014a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1015a6a0b05dSBen Gardon  */
1016a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1017a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1018a6a0b05dSBen Gardon {
1019a6a0b05dSBen Gardon 	struct tdp_iter iter;
1020a6a0b05dSBen Gardon 	u64 new_spte;
1021a6a0b05dSBen Gardon 	bool spte_set = false;
1022a6a0b05dSBen Gardon 
10237cca2d0bSBen Gardon 	rcu_read_lock();
10247cca2d0bSBen Gardon 
1025a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1026a6a0b05dSBen Gardon 
1027a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1028a6a0b05dSBen Gardon 				   min_level, start, end) {
10291af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10301af4a960SBen Gardon 			continue;
10311af4a960SBen Gardon 
1032a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
10330f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
10340f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1035a6a0b05dSBen Gardon 			continue;
1036a6a0b05dSBen Gardon 
1037a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1038a6a0b05dSBen Gardon 
1039a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1040a6a0b05dSBen Gardon 		spte_set = true;
1041a6a0b05dSBen Gardon 	}
10427cca2d0bSBen Gardon 
10437cca2d0bSBen Gardon 	rcu_read_unlock();
1044a6a0b05dSBen Gardon 	return spte_set;
1045a6a0b05dSBen Gardon }
1046a6a0b05dSBen Gardon 
1047a6a0b05dSBen Gardon /*
1048a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1049a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1050a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1051a6a0b05dSBen Gardon  */
1052a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1053a6a0b05dSBen Gardon 			     int min_level)
1054a6a0b05dSBen Gardon {
1055a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1056a6a0b05dSBen Gardon 	bool spte_set = false;
1057a6a0b05dSBen Gardon 
1058a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1059a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1060a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1061a6a0b05dSBen Gardon 
1062a6a0b05dSBen Gardon 	return spte_set;
1063a6a0b05dSBen Gardon }
1064a6a0b05dSBen Gardon 
1065a6a0b05dSBen Gardon /*
1066a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1067a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1068a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1069a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1070a6a0b05dSBen Gardon  * be flushed.
1071a6a0b05dSBen Gardon  */
1072a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1073a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1074a6a0b05dSBen Gardon {
1075a6a0b05dSBen Gardon 	struct tdp_iter iter;
1076a6a0b05dSBen Gardon 	u64 new_spte;
1077a6a0b05dSBen Gardon 	bool spte_set = false;
1078a6a0b05dSBen Gardon 
10797cca2d0bSBen Gardon 	rcu_read_lock();
10807cca2d0bSBen Gardon 
1081a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
10821af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10831af4a960SBen Gardon 			continue;
10841af4a960SBen Gardon 
1085a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1086a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1087a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1088a6a0b05dSBen Gardon 			else
1089a6a0b05dSBen Gardon 				continue;
1090a6a0b05dSBen Gardon 		} else {
1091a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1092a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1093a6a0b05dSBen Gardon 			else
1094a6a0b05dSBen Gardon 				continue;
1095a6a0b05dSBen Gardon 		}
1096a6a0b05dSBen Gardon 
1097a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1098a6a0b05dSBen Gardon 		spte_set = true;
1099a6a0b05dSBen Gardon 	}
11007cca2d0bSBen Gardon 
11017cca2d0bSBen Gardon 	rcu_read_unlock();
1102a6a0b05dSBen Gardon 	return spte_set;
1103a6a0b05dSBen Gardon }
1104a6a0b05dSBen Gardon 
1105a6a0b05dSBen Gardon /*
1106a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1107a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1108a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1109a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1110a6a0b05dSBen Gardon  * be flushed.
1111a6a0b05dSBen Gardon  */
1112a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1113a6a0b05dSBen Gardon {
1114a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1115a6a0b05dSBen Gardon 	bool spte_set = false;
1116a6a0b05dSBen Gardon 
1117a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1118a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1119a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1120a6a0b05dSBen Gardon 
1121a6a0b05dSBen Gardon 	return spte_set;
1122a6a0b05dSBen Gardon }
1123a6a0b05dSBen Gardon 
1124a6a0b05dSBen Gardon /*
1125a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1126a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1127a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1128a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1129a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1130a6a0b05dSBen Gardon  */
1131a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1132a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1133a6a0b05dSBen Gardon {
1134a6a0b05dSBen Gardon 	struct tdp_iter iter;
1135a6a0b05dSBen Gardon 	u64 new_spte;
1136a6a0b05dSBen Gardon 
11377cca2d0bSBen Gardon 	rcu_read_lock();
11387cca2d0bSBen Gardon 
1139a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1140a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1141a6a0b05dSBen Gardon 		if (!mask)
1142a6a0b05dSBen Gardon 			break;
1143a6a0b05dSBen Gardon 
1144a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1145a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1146a6a0b05dSBen Gardon 			continue;
1147a6a0b05dSBen Gardon 
1148f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1149f1b3b06aSBen Gardon 
1150a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1151a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1152a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1153a6a0b05dSBen Gardon 			else
1154a6a0b05dSBen Gardon 				continue;
1155a6a0b05dSBen Gardon 		} else {
1156a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1157a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1158a6a0b05dSBen Gardon 			else
1159a6a0b05dSBen Gardon 				continue;
1160a6a0b05dSBen Gardon 		}
1161a6a0b05dSBen Gardon 
1162a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1163a6a0b05dSBen Gardon 	}
11647cca2d0bSBen Gardon 
11657cca2d0bSBen Gardon 	rcu_read_unlock();
1166a6a0b05dSBen Gardon }
1167a6a0b05dSBen Gardon 
1168a6a0b05dSBen Gardon /*
1169a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1170a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1171a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1172a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1173a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1174a6a0b05dSBen Gardon  */
1175a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1176a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1177a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1178a6a0b05dSBen Gardon 				       bool wrprot)
1179a6a0b05dSBen Gardon {
1180a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1181a6a0b05dSBen Gardon 
1182531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1183a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1184a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1185a6a0b05dSBen Gardon }
1186a6a0b05dSBen Gardon 
1187a6a0b05dSBen Gardon /*
118887aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
118987aa9ec9SBen Gardon  * GFNs within the slot.
119014881998SBen Gardon  */
1191af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
119214881998SBen Gardon 				       struct kvm_mmu_page *root,
1193af95b53eSSean Christopherson 				       struct kvm_memory_slot *slot,
1194af95b53eSSean Christopherson 				       bool flush)
119514881998SBen Gardon {
11969eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
11979eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
119814881998SBen Gardon 	struct tdp_iter iter;
119914881998SBen Gardon 	kvm_pfn_t pfn;
120014881998SBen Gardon 
12017cca2d0bSBen Gardon 	rcu_read_lock();
12027cca2d0bSBen Gardon 
120314881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
1204af95b53eSSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
1205af95b53eSSean Christopherson 			flush = false;
12061af4a960SBen Gardon 			continue;
12071af4a960SBen Gardon 		}
12081af4a960SBen Gardon 
120914881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
121087aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
121114881998SBen Gardon 			continue;
121214881998SBen Gardon 
121314881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
121414881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
12159eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
12169eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
121714881998SBen Gardon 			continue;
121814881998SBen Gardon 
121914881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
122014881998SBen Gardon 
1221af95b53eSSean Christopherson 		flush = true;
122214881998SBen Gardon 	}
122314881998SBen Gardon 
12247cca2d0bSBen Gardon 	rcu_read_unlock();
1225af95b53eSSean Christopherson 
1226af95b53eSSean Christopherson 	return flush;
122714881998SBen Gardon }
122814881998SBen Gardon 
122914881998SBen Gardon /*
123014881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
123114881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
123214881998SBen Gardon  */
1233142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1234142ccde1SSean Christopherson 				       struct kvm_memory_slot *slot, bool flush)
123514881998SBen Gardon {
123614881998SBen Gardon 	struct kvm_mmu_page *root;
123714881998SBen Gardon 
1238a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1239af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1240af95b53eSSean Christopherson 
1241142ccde1SSean Christopherson 	return flush;
124214881998SBen Gardon }
124346044f72SBen Gardon 
124446044f72SBen Gardon /*
124546044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12465fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
124746044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
124846044f72SBen Gardon  */
124946044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
125046044f72SBen Gardon 			      gfn_t gfn)
125146044f72SBen Gardon {
125246044f72SBen Gardon 	struct tdp_iter iter;
125346044f72SBen Gardon 	u64 new_spte;
125446044f72SBen Gardon 	bool spte_set = false;
125546044f72SBen Gardon 
12567cca2d0bSBen Gardon 	rcu_read_lock();
12577cca2d0bSBen Gardon 
125846044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
125946044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
126046044f72SBen Gardon 			break;
126146044f72SBen Gardon 
126246044f72SBen Gardon 		new_spte = iter.old_spte &
12635fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
126446044f72SBen Gardon 
126546044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
126646044f72SBen Gardon 		spte_set = true;
126746044f72SBen Gardon 	}
126846044f72SBen Gardon 
12697cca2d0bSBen Gardon 	rcu_read_unlock();
12707cca2d0bSBen Gardon 
127146044f72SBen Gardon 	return spte_set;
127246044f72SBen Gardon }
127346044f72SBen Gardon 
127446044f72SBen Gardon /*
127546044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12765fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
127746044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
127846044f72SBen Gardon  */
127946044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
128046044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
128146044f72SBen Gardon {
128246044f72SBen Gardon 	struct kvm_mmu_page *root;
128346044f72SBen Gardon 	bool spte_set = false;
128446044f72SBen Gardon 
1285531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1286a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
128746044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
1288a3f15bdaSSean Christopherson 
128946044f72SBen Gardon 	return spte_set;
129046044f72SBen Gardon }
129146044f72SBen Gardon 
129295fb5b02SBen Gardon /*
129395fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
129495fb5b02SBen Gardon  * That SPTE may be non-present.
129595fb5b02SBen Gardon  */
129639b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
129739b4d43eSSean Christopherson 			 int *root_level)
129895fb5b02SBen Gardon {
129995fb5b02SBen Gardon 	struct tdp_iter iter;
130095fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
130195fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
13022aa07893SSean Christopherson 	int leaf = -1;
130395fb5b02SBen Gardon 
130439b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
130595fb5b02SBen Gardon 
13067cca2d0bSBen Gardon 	rcu_read_lock();
13077cca2d0bSBen Gardon 
130895fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
130995fb5b02SBen Gardon 		leaf = iter.level;
1310dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
131195fb5b02SBen Gardon 	}
131295fb5b02SBen Gardon 
13137cca2d0bSBen Gardon 	rcu_read_unlock();
13147cca2d0bSBen Gardon 
131595fb5b02SBen Gardon 	return leaf;
131695fb5b02SBen Gardon }
1317