xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 9eba50f8)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
44a889ea54SBen Gardon static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
45a889ea54SBen Gardon {
46a889ea54SBen Gardon 	if (kvm_mmu_put_root(kvm, root))
47a889ea54SBen Gardon 		kvm_tdp_mmu_free_root(kvm, root);
48a889ea54SBen Gardon }
49a889ea54SBen Gardon 
50a889ea54SBen Gardon static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
51a889ea54SBen Gardon 					   struct kvm_mmu_page *root)
52a889ea54SBen Gardon {
53531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
54a889ea54SBen Gardon 
55a889ea54SBen Gardon 	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
56a889ea54SBen Gardon 		return false;
57a889ea54SBen Gardon 
58a889ea54SBen Gardon 	kvm_mmu_get_root(kvm, root);
59a889ea54SBen Gardon 	return true;
60a889ea54SBen Gardon 
61a889ea54SBen Gardon }
62a889ea54SBen Gardon 
63a889ea54SBen Gardon static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
64a889ea54SBen Gardon 						     struct kvm_mmu_page *root)
65a889ea54SBen Gardon {
66a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
67a889ea54SBen Gardon 
68a889ea54SBen Gardon 	next_root = list_next_entry(root, link);
69a889ea54SBen Gardon 	tdp_mmu_put_root(kvm, root);
70a889ea54SBen Gardon 	return next_root;
71a889ea54SBen Gardon }
72a889ea54SBen Gardon 
73a889ea54SBen Gardon /*
74a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
75a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
76a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
77a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
78a889ea54SBen Gardon  */
79a889ea54SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)				\
80a889ea54SBen Gardon 	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
81a889ea54SBen Gardon 				      typeof(*_root), link);		\
82a889ea54SBen Gardon 	     tdp_mmu_next_root_valid(_kvm, _root);			\
83a889ea54SBen Gardon 	     _root = tdp_mmu_next_root(_kvm, _root))
84a889ea54SBen Gardon 
8502c00b3aSBen Gardon #define for_each_tdp_mmu_root(_kvm, _root)				\
8602c00b3aSBen Gardon 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
8702c00b3aSBen Gardon 
88faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
89063afacdSBen Gardon 			  gfn_t start, gfn_t end, bool can_yield);
90faaf05b0SBen Gardon 
9102c00b3aSBen Gardon void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
9202c00b3aSBen Gardon {
93339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
94faaf05b0SBen Gardon 
95531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
9602c00b3aSBen Gardon 
9702c00b3aSBen Gardon 	WARN_ON(root->root_count);
9802c00b3aSBen Gardon 	WARN_ON(!root->tdp_mmu_page);
9902c00b3aSBen Gardon 
10002c00b3aSBen Gardon 	list_del(&root->link);
10102c00b3aSBen Gardon 
102063afacdSBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false);
103faaf05b0SBen Gardon 
10402c00b3aSBen Gardon 	free_page((unsigned long)root->spt);
10502c00b3aSBen Gardon 	kmem_cache_free(mmu_page_header_cache, root);
10602c00b3aSBen Gardon }
10702c00b3aSBen Gardon 
10802c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
10902c00b3aSBen Gardon 						   int level)
11002c00b3aSBen Gardon {
11102c00b3aSBen Gardon 	union kvm_mmu_page_role role;
11202c00b3aSBen Gardon 
11302c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
11402c00b3aSBen Gardon 	role.level = level;
11502c00b3aSBen Gardon 	role.direct = true;
11602c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
11702c00b3aSBen Gardon 	role.access = ACC_ALL;
11802c00b3aSBen Gardon 
11902c00b3aSBen Gardon 	return role;
12002c00b3aSBen Gardon }
12102c00b3aSBen Gardon 
12202c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
12302c00b3aSBen Gardon 					       int level)
12402c00b3aSBen Gardon {
12502c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
12602c00b3aSBen Gardon 
12702c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
12802c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
12902c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
13002c00b3aSBen Gardon 
13102c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
13202c00b3aSBen Gardon 	sp->gfn = gfn;
13302c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
13402c00b3aSBen Gardon 
13533dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
13633dd3574SBen Gardon 
13702c00b3aSBen Gardon 	return sp;
13802c00b3aSBen Gardon }
13902c00b3aSBen Gardon 
14002c00b3aSBen Gardon static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
14102c00b3aSBen Gardon {
14202c00b3aSBen Gardon 	union kvm_mmu_page_role role;
14302c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
14402c00b3aSBen Gardon 	struct kvm_mmu_page *root;
14502c00b3aSBen Gardon 
14602c00b3aSBen Gardon 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
14702c00b3aSBen Gardon 
148531810caSBen Gardon 	write_lock(&kvm->mmu_lock);
14902c00b3aSBen Gardon 
15002c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
15102c00b3aSBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
15202c00b3aSBen Gardon 		if (root->role.word == role.word) {
15302c00b3aSBen Gardon 			kvm_mmu_get_root(kvm, root);
154531810caSBen Gardon 			write_unlock(&kvm->mmu_lock);
15502c00b3aSBen Gardon 			return root;
15602c00b3aSBen Gardon 		}
15702c00b3aSBen Gardon 	}
15802c00b3aSBen Gardon 
15902c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
16002c00b3aSBen Gardon 	root->root_count = 1;
16102c00b3aSBen Gardon 
16202c00b3aSBen Gardon 	list_add(&root->link, &kvm->arch.tdp_mmu_roots);
16302c00b3aSBen Gardon 
164531810caSBen Gardon 	write_unlock(&kvm->mmu_lock);
16502c00b3aSBen Gardon 
16602c00b3aSBen Gardon 	return root;
16702c00b3aSBen Gardon }
16802c00b3aSBen Gardon 
16902c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
17002c00b3aSBen Gardon {
17102c00b3aSBen Gardon 	struct kvm_mmu_page *root;
17202c00b3aSBen Gardon 
17302c00b3aSBen Gardon 	root = get_tdp_mmu_vcpu_root(vcpu);
17402c00b3aSBen Gardon 	if (!root)
17502c00b3aSBen Gardon 		return INVALID_PAGE;
17602c00b3aSBen Gardon 
17702c00b3aSBen Gardon 	return __pa(root->spt);
178fe5db27dSBen Gardon }
1792f2fad08SBen Gardon 
1807cca2d0bSBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
1817cca2d0bSBen Gardon {
1827cca2d0bSBen Gardon 	free_page((unsigned long)sp->spt);
1837cca2d0bSBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
1847cca2d0bSBen Gardon }
1857cca2d0bSBen Gardon 
1867cca2d0bSBen Gardon /*
1877cca2d0bSBen Gardon  * This is called through call_rcu in order to free TDP page table memory
1887cca2d0bSBen Gardon  * safely with respect to other kernel threads that may be operating on
1897cca2d0bSBen Gardon  * the memory.
1907cca2d0bSBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
1917cca2d0bSBen Gardon  * section, and freeing it after a grace period, lockless access to that
1927cca2d0bSBen Gardon  * memory won't use it after it is freed.
1937cca2d0bSBen Gardon  */
1947cca2d0bSBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
1957cca2d0bSBen Gardon {
1967cca2d0bSBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
1977cca2d0bSBen Gardon 					       rcu_head);
1987cca2d0bSBen Gardon 
1997cca2d0bSBen Gardon 	tdp_mmu_free_sp(sp);
2007cca2d0bSBen Gardon }
2017cca2d0bSBen Gardon 
2022f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2039a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2049a77daacSBen Gardon 				bool shared);
2052f2fad08SBen Gardon 
206faaf05b0SBen Gardon static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
207faaf05b0SBen Gardon {
208faaf05b0SBen Gardon 	return sp->role.smm ? 1 : 0;
209faaf05b0SBen Gardon }
210faaf05b0SBen Gardon 
211f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
212f8e14497SBen Gardon {
213f8e14497SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
214f8e14497SBen Gardon 
215f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
216f8e14497SBen Gardon 		return;
217f8e14497SBen Gardon 
218f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
219f8e14497SBen Gardon 	    (!is_accessed_spte(new_spte) || pfn_changed))
220f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
221f8e14497SBen Gardon }
222f8e14497SBen Gardon 
223a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
224a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
225a6a0b05dSBen Gardon {
226a6a0b05dSBen Gardon 	bool pfn_changed;
227a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
228a6a0b05dSBen Gardon 
229a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
230a6a0b05dSBen Gardon 		return;
231a6a0b05dSBen Gardon 
232a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
233a6a0b05dSBen Gardon 
234a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
235a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
236a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
237fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
238a6a0b05dSBen Gardon 	}
239a6a0b05dSBen Gardon }
240a6a0b05dSBen Gardon 
2412f2fad08SBen Gardon /**
242a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
243a9442f59SBen Gardon  *
244a9442f59SBen Gardon  * @kvm: kvm instance
245a9442f59SBen Gardon  * @sp: the new page
2469a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2479a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2489a77daacSBen Gardon  *	    threads that might be adding or removing pages.
249a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
250a9442f59SBen Gardon  *		eventual reclaim.
251a9442f59SBen Gardon  */
252a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2539a77daacSBen Gardon 			      bool shared, bool account_nx)
254a9442f59SBen Gardon {
2559a77daacSBen Gardon 	if (shared)
2569a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2579a77daacSBen Gardon 	else
258a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
259a9442f59SBen Gardon 
260a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
261a9442f59SBen Gardon 	if (account_nx)
262a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2639a77daacSBen Gardon 
2649a77daacSBen Gardon 	if (shared)
2659a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
266a9442f59SBen Gardon }
267a9442f59SBen Gardon 
268a9442f59SBen Gardon /**
269a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
270a9442f59SBen Gardon  *
271a9442f59SBen Gardon  * @kvm: kvm instance
272a9442f59SBen Gardon  * @sp: the page to be removed
2739a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2749a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2759a77daacSBen Gardon  *	    threads that might be adding or removing pages.
276a9442f59SBen Gardon  */
2779a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2789a77daacSBen Gardon 				bool shared)
279a9442f59SBen Gardon {
2809a77daacSBen Gardon 	if (shared)
2819a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2829a77daacSBen Gardon 	else
283a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
284a9442f59SBen Gardon 
285a9442f59SBen Gardon 	list_del(&sp->link);
286a9442f59SBen Gardon 	if (sp->lpage_disallowed)
287a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2889a77daacSBen Gardon 
2899a77daacSBen Gardon 	if (shared)
2909a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
291a9442f59SBen Gardon }
292a9442f59SBen Gardon 
293a9442f59SBen Gardon /**
294a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
295a066e61fSBen Gardon  *
296a066e61fSBen Gardon  * @kvm: kvm instance
297a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2989a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2999a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3009a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
301a066e61fSBen Gardon  *
302a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
303a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
304a066e61fSBen Gardon  */
3059a77daacSBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
3069a77daacSBen Gardon 					bool shared)
307a066e61fSBen Gardon {
308a066e61fSBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(pt);
309a066e61fSBen Gardon 	int level = sp->role.level;
310e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
311a066e61fSBen Gardon 	u64 old_child_spte;
3129a77daacSBen Gardon 	u64 *sptep;
313e25f0e0cSBen Gardon 	gfn_t gfn;
314a066e61fSBen Gardon 	int i;
315a066e61fSBen Gardon 
316a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
317a066e61fSBen Gardon 
3189a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
319a066e61fSBen Gardon 
320a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
3219a77daacSBen Gardon 		sptep = pt + i;
322e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3239a77daacSBen Gardon 
3249a77daacSBen Gardon 		if (shared) {
325e25f0e0cSBen Gardon 			/*
326e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
327e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
328e25f0e0cSBen Gardon 			 * already marked as removed then another thread
329e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
330e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
331e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
332e25f0e0cSBen Gardon 			 */
333e25f0e0cSBen Gardon 			for (;;) {
334e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
335e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
336e25f0e0cSBen Gardon 					break;
337e25f0e0cSBen Gardon 				cpu_relax();
338e25f0e0cSBen Gardon 			}
3399a77daacSBen Gardon 		} else {
3409a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
341e25f0e0cSBen Gardon 
342e25f0e0cSBen Gardon 			/*
343e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
344e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
345e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
346e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
347e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
348e25f0e0cSBen Gardon 			 * the function.
349e25f0e0cSBen Gardon 			 */
350e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3519a77daacSBen Gardon 		}
352e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
353e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
354e25f0e0cSBen Gardon 				    shared);
355a066e61fSBen Gardon 	}
356a066e61fSBen Gardon 
357a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
358a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
359a066e61fSBen Gardon 
3607cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
361a066e61fSBen Gardon }
362a066e61fSBen Gardon 
363a066e61fSBen Gardon /**
3642f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3652f2fad08SBen Gardon  * @kvm: kvm instance
3662f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3672f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3682f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3692f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3702f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3719a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3729a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3739a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3742f2fad08SBen Gardon  *
3752f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3762f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3772f2fad08SBen Gardon  */
3782f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3799a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3809a77daacSBen Gardon 				  bool shared)
3812f2fad08SBen Gardon {
3822f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3832f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3842f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3852f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3862f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
3872f2fad08SBen Gardon 
3882f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
3892f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
390764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
3912f2fad08SBen Gardon 
3922f2fad08SBen Gardon 	/*
3932f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
3942f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
3952f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
3962f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
3972f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
3982f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
3992f2fad08SBen Gardon 	 * thread before replacement.
4002f2fad08SBen Gardon 	 */
4012f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4022f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4032f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4042f2fad08SBen Gardon 		       "different PFN!\n"
4052f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4062f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4072f2fad08SBen Gardon 
4082f2fad08SBen Gardon 		/*
4092f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4102f2fad08SBen Gardon 		 * courruption.
4112f2fad08SBen Gardon 		 */
4122f2fad08SBen Gardon 		BUG();
4132f2fad08SBen Gardon 	}
4142f2fad08SBen Gardon 
4152f2fad08SBen Gardon 	if (old_spte == new_spte)
4162f2fad08SBen Gardon 		return;
4172f2fad08SBen Gardon 
418b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
419b9a98c34SBen Gardon 
4202f2fad08SBen Gardon 	/*
4212f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4222f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4232f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4242f2fad08SBen Gardon 	 */
4252f2fad08SBen Gardon 	if (!was_present && !is_present) {
4262f2fad08SBen Gardon 		/*
42708f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
42808f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
42908f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
43008f07c80SBen Gardon 		 * are nonpresent.
4312f2fad08SBen Gardon 		 */
43208f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
43308f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
43408f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4352f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4362f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4372f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
43808f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
43908f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4402f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4412f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4422f2fad08SBen Gardon 		return;
4432f2fad08SBen Gardon 	}
4442f2fad08SBen Gardon 
4452f2fad08SBen Gardon 
4462f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
4472f2fad08SBen Gardon 	    (!is_dirty_spte(new_spte) || pfn_changed))
4482f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4492f2fad08SBen Gardon 
4502f2fad08SBen Gardon 	/*
4512f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4522f2fad08SBen Gardon 	 * the paging structure.
4532f2fad08SBen Gardon 	 */
454a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
455a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4569a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4572f2fad08SBen Gardon }
4582f2fad08SBen Gardon 
4592f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4609a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4619a77daacSBen Gardon 				bool shared)
4622f2fad08SBen Gardon {
4639a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4649a77daacSBen Gardon 			      shared);
465f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
466a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
467a6a0b05dSBen Gardon 				      new_spte, level);
4682f2fad08SBen Gardon }
469faaf05b0SBen Gardon 
470fe43fa2fSBen Gardon /*
4719a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4729a77daacSBen Gardon  * associated bookkeeping
4739a77daacSBen Gardon  *
4749a77daacSBen Gardon  * @kvm: kvm instance
4759a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4769a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4779a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4789a77daacSBen Gardon  *	    this function will have no side-effects.
4799a77daacSBen Gardon  */
4809a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4819a77daacSBen Gardon 					   struct tdp_iter *iter,
4829a77daacSBen Gardon 					   u64 new_spte)
4839a77daacSBen Gardon {
4849a77daacSBen Gardon 	u64 *root_pt = tdp_iter_root_pt(iter);
4859a77daacSBen Gardon 	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
4869a77daacSBen Gardon 	int as_id = kvm_mmu_page_as_id(root);
4879a77daacSBen Gardon 
4889a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4899a77daacSBen Gardon 
49008f07c80SBen Gardon 	/*
49108f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
49208f07c80SBen Gardon 	 * may modify it.
49308f07c80SBen Gardon 	 */
49408f07c80SBen Gardon 	if (iter->old_spte == REMOVED_SPTE)
49508f07c80SBen Gardon 		return false;
49608f07c80SBen Gardon 
4979a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
4989a77daacSBen Gardon 		      new_spte) != iter->old_spte)
4999a77daacSBen Gardon 		return false;
5009a77daacSBen Gardon 
5019a77daacSBen Gardon 	handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
5029a77daacSBen Gardon 			    iter->level, true);
5039a77daacSBen Gardon 
5049a77daacSBen Gardon 	return true;
5059a77daacSBen Gardon }
5069a77daacSBen Gardon 
50708f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
50808f07c80SBen Gardon 					   struct tdp_iter *iter)
50908f07c80SBen Gardon {
51008f07c80SBen Gardon 	/*
51108f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
51208f07c80SBen Gardon 	 * non-present value. This will stop other threads from
51308f07c80SBen Gardon 	 * immediately installing a present entry in its place
51408f07c80SBen Gardon 	 * before the TLBs are flushed.
51508f07c80SBen Gardon 	 */
51608f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
51708f07c80SBen Gardon 		return false;
51808f07c80SBen Gardon 
51908f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
52008f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
52108f07c80SBen Gardon 
52208f07c80SBen Gardon 	/*
52308f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
52408f07c80SBen Gardon 	 * must either wait on the MMU lock or use
52508f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
52608f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
52708f07c80SBen Gardon 	 * here since the SPTE is going from non-present
52808f07c80SBen Gardon 	 * to non-present.
52908f07c80SBen Gardon 	 */
53008f07c80SBen Gardon 	WRITE_ONCE(*iter->sptep, 0);
53108f07c80SBen Gardon 
53208f07c80SBen Gardon 	return true;
53308f07c80SBen Gardon }
53408f07c80SBen Gardon 
5359a77daacSBen Gardon 
5369a77daacSBen Gardon /*
537fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
538fe43fa2fSBen Gardon  * @kvm: kvm instance
539fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
540fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
541fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
542fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
543fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
544fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
545fe43fa2fSBen Gardon  *		      double counted.
546fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
547fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
548fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
549fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
550fe43fa2fSBen Gardon  *		      writes from being double counted.
551fe43fa2fSBen Gardon  */
552f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
553a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
554a6a0b05dSBen Gardon 				      bool record_dirty_log)
555faaf05b0SBen Gardon {
5567cca2d0bSBen Gardon 	tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
557faaf05b0SBen Gardon 	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
558faaf05b0SBen Gardon 	int as_id = kvm_mmu_page_as_id(root);
559faaf05b0SBen Gardon 
560531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5613a9a4aa5SBen Gardon 
56208f07c80SBen Gardon 	/*
56308f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
56408f07c80SBen Gardon 	 * temporary removed SPTE value.
56508f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
56608f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
56708f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
56808f07c80SBen Gardon 	 */
56908f07c80SBen Gardon 	WARN_ON(iter->old_spte == REMOVED_SPTE);
57008f07c80SBen Gardon 
5717cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
572faaf05b0SBen Gardon 
573f8e14497SBen Gardon 	__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
5749a77daacSBen Gardon 			      iter->level, false);
575f8e14497SBen Gardon 	if (record_acc_track)
576f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
577f8e14497SBen Gardon 					      iter->level);
578a6a0b05dSBen Gardon 	if (record_dirty_log)
579a6a0b05dSBen Gardon 		handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
580a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
581a6a0b05dSBen Gardon 					      iter->level);
582f8e14497SBen Gardon }
583f8e14497SBen Gardon 
584f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
585f8e14497SBen Gardon 				    u64 new_spte)
586f8e14497SBen Gardon {
587a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
588f8e14497SBen Gardon }
589f8e14497SBen Gardon 
590f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
591f8e14497SBen Gardon 						 struct tdp_iter *iter,
592f8e14497SBen Gardon 						 u64 new_spte)
593f8e14497SBen Gardon {
594a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
595a6a0b05dSBen Gardon }
596a6a0b05dSBen Gardon 
597a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
598a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
599a6a0b05dSBen Gardon 						 u64 new_spte)
600a6a0b05dSBen Gardon {
601a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
602faaf05b0SBen Gardon }
603faaf05b0SBen Gardon 
604faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
605faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
606faaf05b0SBen Gardon 
607f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
608f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
609f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
610f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
611f8e14497SBen Gardon 			continue;					\
612f8e14497SBen Gardon 		else
613f8e14497SBen Gardon 
614bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
615bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
616bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
617bb18842eSBen Gardon 
618faaf05b0SBen Gardon /*
619e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
620e28a436cSBen Gardon  * to the scheduler.
621e28a436cSBen Gardon  *
622e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
623e139a34eSBen Gardon  * TLB flush before yielding.
624e139a34eSBen Gardon  *
625e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
626ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
627ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
628ed5e484bSBen Gardon  * paging structure root.
629e28a436cSBen Gardon  *
630e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
631e28a436cSBen Gardon  * Return false if a yield was not needed.
632e28a436cSBen Gardon  */
633e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
634e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
635a6a0b05dSBen Gardon {
636ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
637ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
638ed5e484bSBen Gardon 		return false;
639ed5e484bSBen Gardon 
640531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6417cca2d0bSBen Gardon 		rcu_read_unlock();
6427cca2d0bSBen Gardon 
643e139a34eSBen Gardon 		if (flush)
644e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
645e139a34eSBen Gardon 
646531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6477cca2d0bSBen Gardon 		rcu_read_lock();
648ed5e484bSBen Gardon 
649ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
650ed5e484bSBen Gardon 
651ed5e484bSBen Gardon 		tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
652ed5e484bSBen Gardon 			       iter->root_level, iter->min_level,
653ed5e484bSBen Gardon 			       iter->next_last_level_gfn);
654ed5e484bSBen Gardon 
655e28a436cSBen Gardon 		return true;
656a6a0b05dSBen Gardon 	}
657e28a436cSBen Gardon 
658e28a436cSBen Gardon 	return false;
659a6a0b05dSBen Gardon }
660a6a0b05dSBen Gardon 
661faaf05b0SBen Gardon /*
662faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
663faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
664faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
665faaf05b0SBen Gardon  * MMU lock.
666063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
667063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
668063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
669063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
670063afacdSBen Gardon  * operation can cause a soft lockup.
671faaf05b0SBen Gardon  */
672faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
673063afacdSBen Gardon 			  gfn_t start, gfn_t end, bool can_yield)
674faaf05b0SBen Gardon {
675faaf05b0SBen Gardon 	struct tdp_iter iter;
676faaf05b0SBen Gardon 	bool flush_needed = false;
677faaf05b0SBen Gardon 
6787cca2d0bSBen Gardon 	rcu_read_lock();
6797cca2d0bSBen Gardon 
680faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6811af4a960SBen Gardon 		if (can_yield &&
6821af4a960SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
6831af4a960SBen Gardon 			flush_needed = false;
6841af4a960SBen Gardon 			continue;
6851af4a960SBen Gardon 		}
6861af4a960SBen Gardon 
687faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
688faaf05b0SBen Gardon 			continue;
689faaf05b0SBen Gardon 
690faaf05b0SBen Gardon 		/*
691faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
692faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
693faaf05b0SBen Gardon 		 * lower level.
694faaf05b0SBen Gardon 		 */
695faaf05b0SBen Gardon 		if ((iter.gfn < start ||
696faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
697faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
698faaf05b0SBen Gardon 			continue;
699faaf05b0SBen Gardon 
700faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
7011af4a960SBen Gardon 		flush_needed = true;
702faaf05b0SBen Gardon 	}
7037cca2d0bSBen Gardon 
7047cca2d0bSBen Gardon 	rcu_read_unlock();
705faaf05b0SBen Gardon 	return flush_needed;
706faaf05b0SBen Gardon }
707faaf05b0SBen Gardon 
708faaf05b0SBen Gardon /*
709faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
710faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
711faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
712faaf05b0SBen Gardon  * MMU lock.
713faaf05b0SBen Gardon  */
714faaf05b0SBen Gardon bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
715faaf05b0SBen Gardon {
716faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
717faaf05b0SBen Gardon 	bool flush = false;
718faaf05b0SBen Gardon 
719a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root)
720063afacdSBen Gardon 		flush |= zap_gfn_range(kvm, root, start, end, true);
721faaf05b0SBen Gardon 
722faaf05b0SBen Gardon 	return flush;
723faaf05b0SBen Gardon }
724faaf05b0SBen Gardon 
725faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
726faaf05b0SBen Gardon {
727339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
728faaf05b0SBen Gardon 	bool flush;
729faaf05b0SBen Gardon 
730faaf05b0SBen Gardon 	flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
731faaf05b0SBen Gardon 	if (flush)
732faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
733faaf05b0SBen Gardon }
734bb18842eSBen Gardon 
735bb18842eSBen Gardon /*
736bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
737bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
738bb18842eSBen Gardon  */
739bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
740bb18842eSBen Gardon 					  int map_writable,
741bb18842eSBen Gardon 					  struct tdp_iter *iter,
742bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
743bb18842eSBen Gardon {
744bb18842eSBen Gardon 	u64 new_spte;
745bb18842eSBen Gardon 	int ret = 0;
746bb18842eSBen Gardon 	int make_spte_ret = 0;
747bb18842eSBen Gardon 
7489a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
749bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7509a77daacSBen Gardon 	else
751bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
752bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
753bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
754bb18842eSBen Gardon 					 &new_spte);
755bb18842eSBen Gardon 
756bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
757bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7589a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7599a77daacSBen Gardon 		return RET_PF_RETRY;
760bb18842eSBen Gardon 
761bb18842eSBen Gardon 	/*
762bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
763bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
764bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
765bb18842eSBen Gardon 	 */
766bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
767bb18842eSBen Gardon 		if (write)
768bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
769bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
770bb18842eSBen Gardon 	}
771bb18842eSBen Gardon 
772bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7739a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7749a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7759a77daacSBen Gardon 				     new_spte);
776bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
7779a77daacSBen Gardon 	} else
7789a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7799a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
780bb18842eSBen Gardon 
7817cca2d0bSBen Gardon 	trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7827cca2d0bSBen Gardon 			       rcu_dereference(iter->sptep));
783bb18842eSBen Gardon 	if (!prefault)
784bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
785bb18842eSBen Gardon 
786bb18842eSBen Gardon 	return ret;
787bb18842eSBen Gardon }
788bb18842eSBen Gardon 
789bb18842eSBen Gardon /*
790bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
791bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
792bb18842eSBen Gardon  */
793bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
794bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
795bb18842eSBen Gardon 		    bool prefault)
796bb18842eSBen Gardon {
797bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
798bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
799bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
800bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
801bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
802bb18842eSBen Gardon 	struct tdp_iter iter;
80389c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
804bb18842eSBen Gardon 	u64 *child_pt;
805bb18842eSBen Gardon 	u64 new_spte;
806bb18842eSBen Gardon 	int ret;
807bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
808bb18842eSBen Gardon 	int level;
809bb18842eSBen Gardon 	int req_level;
810bb18842eSBen Gardon 
811bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
812bb18842eSBen Gardon 		return RET_PF_RETRY;
813bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
814bb18842eSBen Gardon 		return RET_PF_RETRY;
815bb18842eSBen Gardon 
816bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
817bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
818bb18842eSBen Gardon 
819bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8207cca2d0bSBen Gardon 
8217cca2d0bSBen Gardon 	rcu_read_lock();
8227cca2d0bSBen Gardon 
823bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
824bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
825bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
826bb18842eSBen Gardon 						   iter.level, &pfn, &level);
827bb18842eSBen Gardon 
828bb18842eSBen Gardon 		if (iter.level == level)
829bb18842eSBen Gardon 			break;
830bb18842eSBen Gardon 
831bb18842eSBen Gardon 		/*
832bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
833bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
834bb18842eSBen Gardon 		 * with a non-leaf SPTE.
835bb18842eSBen Gardon 		 */
836bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
837bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
83808f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8399a77daacSBen Gardon 				break;
840bb18842eSBen Gardon 
841bb18842eSBen Gardon 			/*
842bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
843bb18842eSBen Gardon 			 * because the new value informs the !present
844bb18842eSBen Gardon 			 * path below.
845bb18842eSBen Gardon 			 */
8467cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
847bb18842eSBen Gardon 		}
848bb18842eSBen Gardon 
849bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
85089c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
85189c0fd49SBen Gardon 			child_pt = sp->spt;
852a9442f59SBen Gardon 
853bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
854bb18842eSBen Gardon 						     !shadow_accessed_mask);
855bb18842eSBen Gardon 
8569a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8579a77daacSBen Gardon 						    new_spte)) {
8589a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8599a77daacSBen Gardon 						  huge_page_disallowed &&
8609a77daacSBen Gardon 						  req_level >= iter.level);
8619a77daacSBen Gardon 
862bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8639a77daacSBen Gardon 			} else {
8649a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8659a77daacSBen Gardon 				break;
8669a77daacSBen Gardon 			}
867bb18842eSBen Gardon 		}
868bb18842eSBen Gardon 	}
869bb18842eSBen Gardon 
8709a77daacSBen Gardon 	if (iter.level != level) {
8717cca2d0bSBen Gardon 		rcu_read_unlock();
872bb18842eSBen Gardon 		return RET_PF_RETRY;
8737cca2d0bSBen Gardon 	}
874bb18842eSBen Gardon 
875bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
876bb18842eSBen Gardon 					      pfn, prefault);
8777cca2d0bSBen Gardon 	rcu_read_unlock();
878bb18842eSBen Gardon 
879bb18842eSBen Gardon 	return ret;
880bb18842eSBen Gardon }
881063afacdSBen Gardon 
8828f5c44f9SMaciej S. Szmigiero static __always_inline int
8838f5c44f9SMaciej S. Szmigiero kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
8848f5c44f9SMaciej S. Szmigiero 			     unsigned long start,
8858f5c44f9SMaciej S. Szmigiero 			     unsigned long end,
8868f5c44f9SMaciej S. Szmigiero 			     unsigned long data,
8878f5c44f9SMaciej S. Szmigiero 			     int (*handler)(struct kvm *kvm,
8888f5c44f9SMaciej S. Szmigiero 					    struct kvm_memory_slot *slot,
8898f5c44f9SMaciej S. Szmigiero 					    struct kvm_mmu_page *root,
8908f5c44f9SMaciej S. Szmigiero 					    gfn_t start,
8918f5c44f9SMaciej S. Szmigiero 					    gfn_t end,
8928f5c44f9SMaciej S. Szmigiero 					    unsigned long data))
893063afacdSBen Gardon {
894063afacdSBen Gardon 	struct kvm_memslots *slots;
895063afacdSBen Gardon 	struct kvm_memory_slot *memslot;
896063afacdSBen Gardon 	struct kvm_mmu_page *root;
897063afacdSBen Gardon 	int ret = 0;
898063afacdSBen Gardon 	int as_id;
899063afacdSBen Gardon 
900a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
901063afacdSBen Gardon 		as_id = kvm_mmu_page_as_id(root);
902063afacdSBen Gardon 		slots = __kvm_memslots(kvm, as_id);
903063afacdSBen Gardon 		kvm_for_each_memslot(memslot, slots) {
904063afacdSBen Gardon 			unsigned long hva_start, hva_end;
905063afacdSBen Gardon 			gfn_t gfn_start, gfn_end;
906063afacdSBen Gardon 
907063afacdSBen Gardon 			hva_start = max(start, memslot->userspace_addr);
908063afacdSBen Gardon 			hva_end = min(end, memslot->userspace_addr +
909063afacdSBen Gardon 				      (memslot->npages << PAGE_SHIFT));
910063afacdSBen Gardon 			if (hva_start >= hva_end)
911063afacdSBen Gardon 				continue;
912063afacdSBen Gardon 			/*
913063afacdSBen Gardon 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
914063afacdSBen Gardon 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
915063afacdSBen Gardon 			 */
916063afacdSBen Gardon 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
917063afacdSBen Gardon 			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
918063afacdSBen Gardon 
919063afacdSBen Gardon 			ret |= handler(kvm, memslot, root, gfn_start,
920063afacdSBen Gardon 				       gfn_end, data);
921063afacdSBen Gardon 		}
922063afacdSBen Gardon 	}
923063afacdSBen Gardon 
924063afacdSBen Gardon 	return ret;
925063afacdSBen Gardon }
926063afacdSBen Gardon 
927063afacdSBen Gardon static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
928063afacdSBen Gardon 				     struct kvm_memory_slot *slot,
929063afacdSBen Gardon 				     struct kvm_mmu_page *root, gfn_t start,
930063afacdSBen Gardon 				     gfn_t end, unsigned long unused)
931063afacdSBen Gardon {
932063afacdSBen Gardon 	return zap_gfn_range(kvm, root, start, end, false);
933063afacdSBen Gardon }
934063afacdSBen Gardon 
935063afacdSBen Gardon int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
936063afacdSBen Gardon 			      unsigned long end)
937063afacdSBen Gardon {
938063afacdSBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
939063afacdSBen Gardon 					    zap_gfn_range_hva_wrapper);
940063afacdSBen Gardon }
941f8e14497SBen Gardon 
942f8e14497SBen Gardon /*
943f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
944f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
945f8e14497SBen Gardon  */
946f8e14497SBen Gardon static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
947f8e14497SBen Gardon 			 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
948f8e14497SBen Gardon 			 unsigned long unused)
949f8e14497SBen Gardon {
950f8e14497SBen Gardon 	struct tdp_iter iter;
951f8e14497SBen Gardon 	int young = 0;
952f8e14497SBen Gardon 	u64 new_spte = 0;
953f8e14497SBen Gardon 
9547cca2d0bSBen Gardon 	rcu_read_lock();
9557cca2d0bSBen Gardon 
956f8e14497SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
957f8e14497SBen Gardon 		/*
958f8e14497SBen Gardon 		 * If we have a non-accessed entry we don't need to change the
959f8e14497SBen Gardon 		 * pte.
960f8e14497SBen Gardon 		 */
961f8e14497SBen Gardon 		if (!is_accessed_spte(iter.old_spte))
962f8e14497SBen Gardon 			continue;
963f8e14497SBen Gardon 
964f8e14497SBen Gardon 		new_spte = iter.old_spte;
965f8e14497SBen Gardon 
966f8e14497SBen Gardon 		if (spte_ad_enabled(new_spte)) {
967f8e14497SBen Gardon 			clear_bit((ffs(shadow_accessed_mask) - 1),
968f8e14497SBen Gardon 				  (unsigned long *)&new_spte);
969f8e14497SBen Gardon 		} else {
970f8e14497SBen Gardon 			/*
971f8e14497SBen Gardon 			 * Capture the dirty status of the page, so that it doesn't get
972f8e14497SBen Gardon 			 * lost when the SPTE is marked for access tracking.
973f8e14497SBen Gardon 			 */
974f8e14497SBen Gardon 			if (is_writable_pte(new_spte))
975f8e14497SBen Gardon 				kvm_set_pfn_dirty(spte_to_pfn(new_spte));
976f8e14497SBen Gardon 
977f8e14497SBen Gardon 			new_spte = mark_spte_for_access_track(new_spte);
978f8e14497SBen Gardon 		}
979a6a0b05dSBen Gardon 		new_spte &= ~shadow_dirty_mask;
980f8e14497SBen Gardon 
981f8e14497SBen Gardon 		tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
982f8e14497SBen Gardon 		young = 1;
98333dd3574SBen Gardon 
98433dd3574SBen Gardon 		trace_kvm_age_page(iter.gfn, iter.level, slot, young);
985f8e14497SBen Gardon 	}
986f8e14497SBen Gardon 
9877cca2d0bSBen Gardon 	rcu_read_unlock();
9887cca2d0bSBen Gardon 
989f8e14497SBen Gardon 	return young;
990f8e14497SBen Gardon }
991f8e14497SBen Gardon 
992f8e14497SBen Gardon int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
993f8e14497SBen Gardon 			      unsigned long end)
994f8e14497SBen Gardon {
995f8e14497SBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
996f8e14497SBen Gardon 					    age_gfn_range);
997f8e14497SBen Gardon }
998f8e14497SBen Gardon 
999f8e14497SBen Gardon static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
1000f8e14497SBen Gardon 			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
1001f8e14497SBen Gardon 			unsigned long unused2)
1002f8e14497SBen Gardon {
1003f8e14497SBen Gardon 	struct tdp_iter iter;
1004f8e14497SBen Gardon 
1005f8e14497SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
1006f8e14497SBen Gardon 		if (is_accessed_spte(iter.old_spte))
1007f8e14497SBen Gardon 			return 1;
1008f8e14497SBen Gardon 
1009f8e14497SBen Gardon 	return 0;
1010f8e14497SBen Gardon }
1011f8e14497SBen Gardon 
1012f8e14497SBen Gardon int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
1013f8e14497SBen Gardon {
1014f8e14497SBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
1015f8e14497SBen Gardon 					    test_age_gfn);
1016f8e14497SBen Gardon }
10171d8dd6b3SBen Gardon 
10181d8dd6b3SBen Gardon /*
10191d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
10201d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
10211d8dd6b3SBen Gardon  * notifier.
10221d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
10231d8dd6b3SBen Gardon  */
10241d8dd6b3SBen Gardon static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
10251d8dd6b3SBen Gardon 			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
10261d8dd6b3SBen Gardon 			unsigned long data)
10271d8dd6b3SBen Gardon {
10281d8dd6b3SBen Gardon 	struct tdp_iter iter;
10291d8dd6b3SBen Gardon 	pte_t *ptep = (pte_t *)data;
10301d8dd6b3SBen Gardon 	kvm_pfn_t new_pfn;
10311d8dd6b3SBen Gardon 	u64 new_spte;
10321d8dd6b3SBen Gardon 	int need_flush = 0;
10331d8dd6b3SBen Gardon 
10347cca2d0bSBen Gardon 	rcu_read_lock();
10357cca2d0bSBen Gardon 
10361d8dd6b3SBen Gardon 	WARN_ON(pte_huge(*ptep));
10371d8dd6b3SBen Gardon 
10381d8dd6b3SBen Gardon 	new_pfn = pte_pfn(*ptep);
10391d8dd6b3SBen Gardon 
10401d8dd6b3SBen Gardon 	tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
10411d8dd6b3SBen Gardon 		if (iter.level != PG_LEVEL_4K)
10421d8dd6b3SBen Gardon 			continue;
10431d8dd6b3SBen Gardon 
10441d8dd6b3SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
10451d8dd6b3SBen Gardon 			break;
10461d8dd6b3SBen Gardon 
10471d8dd6b3SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
10481d8dd6b3SBen Gardon 
10491d8dd6b3SBen Gardon 		kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
10501d8dd6b3SBen Gardon 
10511d8dd6b3SBen Gardon 		if (!pte_write(*ptep)) {
10521d8dd6b3SBen Gardon 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
10531d8dd6b3SBen Gardon 					iter.old_spte, new_pfn);
10541d8dd6b3SBen Gardon 
10551d8dd6b3SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, new_spte);
10561d8dd6b3SBen Gardon 		}
10571d8dd6b3SBen Gardon 
10581d8dd6b3SBen Gardon 		need_flush = 1;
10591d8dd6b3SBen Gardon 	}
10601d8dd6b3SBen Gardon 
10611d8dd6b3SBen Gardon 	if (need_flush)
10621d8dd6b3SBen Gardon 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
10631d8dd6b3SBen Gardon 
10647cca2d0bSBen Gardon 	rcu_read_unlock();
10657cca2d0bSBen Gardon 
10661d8dd6b3SBen Gardon 	return 0;
10671d8dd6b3SBen Gardon }
10681d8dd6b3SBen Gardon 
10691d8dd6b3SBen Gardon int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
10701d8dd6b3SBen Gardon 			     pte_t *host_ptep)
10711d8dd6b3SBen Gardon {
10721d8dd6b3SBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
10731d8dd6b3SBen Gardon 					    (unsigned long)host_ptep,
10741d8dd6b3SBen Gardon 					    set_tdp_spte);
10751d8dd6b3SBen Gardon }
10761d8dd6b3SBen Gardon 
1077a6a0b05dSBen Gardon /*
1078a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1079a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1080a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1081a6a0b05dSBen Gardon  */
1082a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1083a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1084a6a0b05dSBen Gardon {
1085a6a0b05dSBen Gardon 	struct tdp_iter iter;
1086a6a0b05dSBen Gardon 	u64 new_spte;
1087a6a0b05dSBen Gardon 	bool spte_set = false;
1088a6a0b05dSBen Gardon 
10897cca2d0bSBen Gardon 	rcu_read_lock();
10907cca2d0bSBen Gardon 
1091a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1092a6a0b05dSBen Gardon 
1093a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1094a6a0b05dSBen Gardon 				   min_level, start, end) {
10951af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10961af4a960SBen Gardon 			continue;
10971af4a960SBen Gardon 
1098a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
10990f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
11000f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1101a6a0b05dSBen Gardon 			continue;
1102a6a0b05dSBen Gardon 
1103a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1104a6a0b05dSBen Gardon 
1105a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1106a6a0b05dSBen Gardon 		spte_set = true;
1107a6a0b05dSBen Gardon 	}
11087cca2d0bSBen Gardon 
11097cca2d0bSBen Gardon 	rcu_read_unlock();
1110a6a0b05dSBen Gardon 	return spte_set;
1111a6a0b05dSBen Gardon }
1112a6a0b05dSBen Gardon 
1113a6a0b05dSBen Gardon /*
1114a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1115a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1116a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1117a6a0b05dSBen Gardon  */
1118a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1119a6a0b05dSBen Gardon 			     int min_level)
1120a6a0b05dSBen Gardon {
1121a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1122a6a0b05dSBen Gardon 	int root_as_id;
1123a6a0b05dSBen Gardon 	bool spte_set = false;
1124a6a0b05dSBen Gardon 
1125a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1126a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1127a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1128a6a0b05dSBen Gardon 			continue;
1129a6a0b05dSBen Gardon 
1130a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1131a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1132a6a0b05dSBen Gardon 	}
1133a6a0b05dSBen Gardon 
1134a6a0b05dSBen Gardon 	return spte_set;
1135a6a0b05dSBen Gardon }
1136a6a0b05dSBen Gardon 
1137a6a0b05dSBen Gardon /*
1138a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1139a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1140a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1141a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1142a6a0b05dSBen Gardon  * be flushed.
1143a6a0b05dSBen Gardon  */
1144a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1145a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1146a6a0b05dSBen Gardon {
1147a6a0b05dSBen Gardon 	struct tdp_iter iter;
1148a6a0b05dSBen Gardon 	u64 new_spte;
1149a6a0b05dSBen Gardon 	bool spte_set = false;
1150a6a0b05dSBen Gardon 
11517cca2d0bSBen Gardon 	rcu_read_lock();
11527cca2d0bSBen Gardon 
1153a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
11541af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
11551af4a960SBen Gardon 			continue;
11561af4a960SBen Gardon 
1157a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1158a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1159a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1160a6a0b05dSBen Gardon 			else
1161a6a0b05dSBen Gardon 				continue;
1162a6a0b05dSBen Gardon 		} else {
1163a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1164a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1165a6a0b05dSBen Gardon 			else
1166a6a0b05dSBen Gardon 				continue;
1167a6a0b05dSBen Gardon 		}
1168a6a0b05dSBen Gardon 
1169a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1170a6a0b05dSBen Gardon 		spte_set = true;
1171a6a0b05dSBen Gardon 	}
11727cca2d0bSBen Gardon 
11737cca2d0bSBen Gardon 	rcu_read_unlock();
1174a6a0b05dSBen Gardon 	return spte_set;
1175a6a0b05dSBen Gardon }
1176a6a0b05dSBen Gardon 
1177a6a0b05dSBen Gardon /*
1178a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1179a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1180a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1181a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1182a6a0b05dSBen Gardon  * be flushed.
1183a6a0b05dSBen Gardon  */
1184a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1185a6a0b05dSBen Gardon {
1186a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1187a6a0b05dSBen Gardon 	int root_as_id;
1188a6a0b05dSBen Gardon 	bool spte_set = false;
1189a6a0b05dSBen Gardon 
1190a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1191a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1192a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1193a6a0b05dSBen Gardon 			continue;
1194a6a0b05dSBen Gardon 
1195a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1196a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1197a6a0b05dSBen Gardon 	}
1198a6a0b05dSBen Gardon 
1199a6a0b05dSBen Gardon 	return spte_set;
1200a6a0b05dSBen Gardon }
1201a6a0b05dSBen Gardon 
1202a6a0b05dSBen Gardon /*
1203a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1204a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1205a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1206a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1207a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1208a6a0b05dSBen Gardon  */
1209a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1210a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1211a6a0b05dSBen Gardon {
1212a6a0b05dSBen Gardon 	struct tdp_iter iter;
1213a6a0b05dSBen Gardon 	u64 new_spte;
1214a6a0b05dSBen Gardon 
12157cca2d0bSBen Gardon 	rcu_read_lock();
12167cca2d0bSBen Gardon 
1217a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1218a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1219a6a0b05dSBen Gardon 		if (!mask)
1220a6a0b05dSBen Gardon 			break;
1221a6a0b05dSBen Gardon 
1222a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1223a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1224a6a0b05dSBen Gardon 			continue;
1225a6a0b05dSBen Gardon 
1226f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1227f1b3b06aSBen Gardon 
1228a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1229a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1230a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1231a6a0b05dSBen Gardon 			else
1232a6a0b05dSBen Gardon 				continue;
1233a6a0b05dSBen Gardon 		} else {
1234a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1235a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1236a6a0b05dSBen Gardon 			else
1237a6a0b05dSBen Gardon 				continue;
1238a6a0b05dSBen Gardon 		}
1239a6a0b05dSBen Gardon 
1240a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1241a6a0b05dSBen Gardon 	}
12427cca2d0bSBen Gardon 
12437cca2d0bSBen Gardon 	rcu_read_unlock();
1244a6a0b05dSBen Gardon }
1245a6a0b05dSBen Gardon 
1246a6a0b05dSBen Gardon /*
1247a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1248a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1249a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1250a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1251a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1252a6a0b05dSBen Gardon  */
1253a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1254a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1255a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1256a6a0b05dSBen Gardon 				       bool wrprot)
1257a6a0b05dSBen Gardon {
1258a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1259a6a0b05dSBen Gardon 	int root_as_id;
1260a6a0b05dSBen Gardon 
1261531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1262a6a0b05dSBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
1263a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1264a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1265a6a0b05dSBen Gardon 			continue;
1266a6a0b05dSBen Gardon 
1267a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1268a6a0b05dSBen Gardon 	}
1269a6a0b05dSBen Gardon }
1270a6a0b05dSBen Gardon 
1271a6a0b05dSBen Gardon /*
1272a6a0b05dSBen Gardon  * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
1273a6a0b05dSBen Gardon  * only used for PML, and so will involve setting the dirty bit on each SPTE.
1274a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1275a6a0b05dSBen Gardon  */
1276a6a0b05dSBen Gardon static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1277a6a0b05dSBen Gardon 				gfn_t start, gfn_t end)
1278a6a0b05dSBen Gardon {
1279a6a0b05dSBen Gardon 	struct tdp_iter iter;
1280a6a0b05dSBen Gardon 	u64 new_spte;
1281a6a0b05dSBen Gardon 	bool spte_set = false;
1282a6a0b05dSBen Gardon 
12837cca2d0bSBen Gardon 	rcu_read_lock();
12847cca2d0bSBen Gardon 
1285a6a0b05dSBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
12861af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
12871af4a960SBen Gardon 			continue;
12881af4a960SBen Gardon 
12890f99ee2cSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
12900f99ee2cSBen Gardon 		    iter.old_spte & shadow_dirty_mask)
1291a6a0b05dSBen Gardon 			continue;
1292a6a0b05dSBen Gardon 
1293a6a0b05dSBen Gardon 		new_spte = iter.old_spte | shadow_dirty_mask;
1294a6a0b05dSBen Gardon 
1295a6a0b05dSBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
1296a6a0b05dSBen Gardon 		spte_set = true;
1297a6a0b05dSBen Gardon 	}
1298a6a0b05dSBen Gardon 
12997cca2d0bSBen Gardon 	rcu_read_unlock();
1300a6a0b05dSBen Gardon 	return spte_set;
1301a6a0b05dSBen Gardon }
1302a6a0b05dSBen Gardon 
1303a6a0b05dSBen Gardon /*
1304a6a0b05dSBen Gardon  * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
1305a6a0b05dSBen Gardon  * only used for PML, and so will involve setting the dirty bit on each SPTE.
1306a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1307a6a0b05dSBen Gardon  */
1308a6a0b05dSBen Gardon bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
1309a6a0b05dSBen Gardon {
1310a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1311a6a0b05dSBen Gardon 	int root_as_id;
1312a6a0b05dSBen Gardon 	bool spte_set = false;
1313a6a0b05dSBen Gardon 
1314a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1315a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1316a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1317a6a0b05dSBen Gardon 			continue;
1318a6a0b05dSBen Gardon 
1319a6a0b05dSBen Gardon 		spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
1320a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1321a6a0b05dSBen Gardon 	}
1322a6a0b05dSBen Gardon 	return spte_set;
1323a6a0b05dSBen Gardon }
1324a6a0b05dSBen Gardon 
132514881998SBen Gardon /*
132687aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
132787aa9ec9SBen Gardon  * GFNs within the slot.
132814881998SBen Gardon  */
132914881998SBen Gardon static void zap_collapsible_spte_range(struct kvm *kvm,
133014881998SBen Gardon 				       struct kvm_mmu_page *root,
1331*9eba50f8SSean Christopherson 				       struct kvm_memory_slot *slot)
133214881998SBen Gardon {
1333*9eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
1334*9eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
133514881998SBen Gardon 	struct tdp_iter iter;
133614881998SBen Gardon 	kvm_pfn_t pfn;
133714881998SBen Gardon 	bool spte_set = false;
133814881998SBen Gardon 
13397cca2d0bSBen Gardon 	rcu_read_lock();
13407cca2d0bSBen Gardon 
134114881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
13421af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
13431af4a960SBen Gardon 			spte_set = false;
13441af4a960SBen Gardon 			continue;
13451af4a960SBen Gardon 		}
13461af4a960SBen Gardon 
134714881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
134887aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
134914881998SBen Gardon 			continue;
135014881998SBen Gardon 
135114881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
135214881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
1353*9eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1354*9eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
135514881998SBen Gardon 			continue;
135614881998SBen Gardon 
135714881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
135814881998SBen Gardon 
13591af4a960SBen Gardon 		spte_set = true;
136014881998SBen Gardon 	}
136114881998SBen Gardon 
13627cca2d0bSBen Gardon 	rcu_read_unlock();
136314881998SBen Gardon 	if (spte_set)
136414881998SBen Gardon 		kvm_flush_remote_tlbs(kvm);
136514881998SBen Gardon }
136614881998SBen Gardon 
136714881998SBen Gardon /*
136814881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
136914881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
137014881998SBen Gardon  */
137114881998SBen Gardon void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1372*9eba50f8SSean Christopherson 				       struct kvm_memory_slot *slot)
137314881998SBen Gardon {
137414881998SBen Gardon 	struct kvm_mmu_page *root;
137514881998SBen Gardon 	int root_as_id;
137614881998SBen Gardon 
1377a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
137814881998SBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
137914881998SBen Gardon 		if (root_as_id != slot->as_id)
138014881998SBen Gardon 			continue;
138114881998SBen Gardon 
1382*9eba50f8SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
138314881998SBen Gardon 	}
138414881998SBen Gardon }
138546044f72SBen Gardon 
138646044f72SBen Gardon /*
138746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
138846044f72SBen Gardon  * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
138946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
139046044f72SBen Gardon  */
139146044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
139246044f72SBen Gardon 			      gfn_t gfn)
139346044f72SBen Gardon {
139446044f72SBen Gardon 	struct tdp_iter iter;
139546044f72SBen Gardon 	u64 new_spte;
139646044f72SBen Gardon 	bool spte_set = false;
139746044f72SBen Gardon 
13987cca2d0bSBen Gardon 	rcu_read_lock();
13997cca2d0bSBen Gardon 
140046044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
140146044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
140246044f72SBen Gardon 			break;
140346044f72SBen Gardon 
140446044f72SBen Gardon 		new_spte = iter.old_spte &
140546044f72SBen Gardon 			~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
140646044f72SBen Gardon 
140746044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
140846044f72SBen Gardon 		spte_set = true;
140946044f72SBen Gardon 	}
141046044f72SBen Gardon 
14117cca2d0bSBen Gardon 	rcu_read_unlock();
14127cca2d0bSBen Gardon 
141346044f72SBen Gardon 	return spte_set;
141446044f72SBen Gardon }
141546044f72SBen Gardon 
141646044f72SBen Gardon /*
141746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
141846044f72SBen Gardon  * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
141946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
142046044f72SBen Gardon  */
142146044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
142246044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
142346044f72SBen Gardon {
142446044f72SBen Gardon 	struct kvm_mmu_page *root;
142546044f72SBen Gardon 	int root_as_id;
142646044f72SBen Gardon 	bool spte_set = false;
142746044f72SBen Gardon 
1428531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
142946044f72SBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
143046044f72SBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
143146044f72SBen Gardon 		if (root_as_id != slot->as_id)
143246044f72SBen Gardon 			continue;
143346044f72SBen Gardon 
143446044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
143546044f72SBen Gardon 	}
143646044f72SBen Gardon 	return spte_set;
143746044f72SBen Gardon }
143846044f72SBen Gardon 
143995fb5b02SBen Gardon /*
144095fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
144195fb5b02SBen Gardon  * That SPTE may be non-present.
144295fb5b02SBen Gardon  */
144339b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
144439b4d43eSSean Christopherson 			 int *root_level)
144595fb5b02SBen Gardon {
144695fb5b02SBen Gardon 	struct tdp_iter iter;
144795fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
144895fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
14492aa07893SSean Christopherson 	int leaf = -1;
145095fb5b02SBen Gardon 
145139b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
145295fb5b02SBen Gardon 
14537cca2d0bSBen Gardon 	rcu_read_lock();
14547cca2d0bSBen Gardon 
145595fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
145695fb5b02SBen Gardon 		leaf = iter.level;
1457dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
145895fb5b02SBen Gardon 	}
145995fb5b02SBen Gardon 
14607cca2d0bSBen Gardon 	rcu_read_unlock();
14617cca2d0bSBen Gardon 
146295fb5b02SBen Gardon 	return leaf;
146395fb5b02SBen Gardon }
1464