xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 2bdb3d84)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
44*2bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
45*2bdb3d84SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush);
46*2bdb3d84SBen Gardon 
47*2bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
48a889ea54SBen Gardon {
49*2bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
50*2bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
51*2bdb3d84SBen Gardon }
52*2bdb3d84SBen Gardon 
53*2bdb3d84SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
54*2bdb3d84SBen Gardon {
55*2bdb3d84SBen Gardon 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
56*2bdb3d84SBen Gardon 
57*2bdb3d84SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
58*2bdb3d84SBen Gardon 
59*2bdb3d84SBen Gardon 	if (--root->root_count)
60*2bdb3d84SBen Gardon 		return;
61*2bdb3d84SBen Gardon 
62*2bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
63*2bdb3d84SBen Gardon 
64*2bdb3d84SBen Gardon 	list_del(&root->link);
65*2bdb3d84SBen Gardon 
66*2bdb3d84SBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false, false);
67*2bdb3d84SBen Gardon 
68*2bdb3d84SBen Gardon 	tdp_mmu_free_sp(root);
69a889ea54SBen Gardon }
70a889ea54SBen Gardon 
71a889ea54SBen Gardon static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
72a889ea54SBen Gardon 					   struct kvm_mmu_page *root)
73a889ea54SBen Gardon {
74531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
75a889ea54SBen Gardon 
76a889ea54SBen Gardon 	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
77a889ea54SBen Gardon 		return false;
78a889ea54SBen Gardon 
7976eb54e7SBen Gardon 	kvm_tdp_mmu_get_root(kvm, root);
80a889ea54SBen Gardon 	return true;
81a889ea54SBen Gardon 
82a889ea54SBen Gardon }
83a889ea54SBen Gardon 
84a889ea54SBen Gardon static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
85a889ea54SBen Gardon 						     struct kvm_mmu_page *root)
86a889ea54SBen Gardon {
87a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
88a889ea54SBen Gardon 
89a889ea54SBen Gardon 	next_root = list_next_entry(root, link);
90*2bdb3d84SBen Gardon 	kvm_tdp_mmu_put_root(kvm, root);
91a889ea54SBen Gardon 	return next_root;
92a889ea54SBen Gardon }
93a889ea54SBen Gardon 
94a889ea54SBen Gardon /*
95a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
96a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
97a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
98a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
99a889ea54SBen Gardon  */
100a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)		\
101a889ea54SBen Gardon 	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
102a889ea54SBen Gardon 				      typeof(*_root), link);		\
103a889ea54SBen Gardon 	     tdp_mmu_next_root_valid(_kvm, _root);			\
104a3f15bdaSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root))			\
105a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
106a3f15bdaSSean Christopherson 		} else
107a889ea54SBen Gardon 
108a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
109a3f15bdaSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
110a3f15bdaSSean Christopherson 		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
111a3f15bdaSSean Christopherson 		} else
11202c00b3aSBen Gardon 
11302c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
11402c00b3aSBen Gardon 						   int level)
11502c00b3aSBen Gardon {
11602c00b3aSBen Gardon 	union kvm_mmu_page_role role;
11702c00b3aSBen Gardon 
11802c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
11902c00b3aSBen Gardon 	role.level = level;
12002c00b3aSBen Gardon 	role.direct = true;
12102c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
12202c00b3aSBen Gardon 	role.access = ACC_ALL;
12302c00b3aSBen Gardon 
12402c00b3aSBen Gardon 	return role;
12502c00b3aSBen Gardon }
12602c00b3aSBen Gardon 
12702c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
12802c00b3aSBen Gardon 					       int level)
12902c00b3aSBen Gardon {
13002c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
13102c00b3aSBen Gardon 
13202c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
13302c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
13402c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
13502c00b3aSBen Gardon 
13602c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
13702c00b3aSBen Gardon 	sp->gfn = gfn;
13802c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
13902c00b3aSBen Gardon 
14033dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
14133dd3574SBen Gardon 
14202c00b3aSBen Gardon 	return sp;
14302c00b3aSBen Gardon }
14402c00b3aSBen Gardon 
1456e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
14602c00b3aSBen Gardon {
14702c00b3aSBen Gardon 	union kvm_mmu_page_role role;
14802c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
14902c00b3aSBen Gardon 	struct kvm_mmu_page *root;
15002c00b3aSBen Gardon 
1516e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
15202c00b3aSBen Gardon 
1536e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
15402c00b3aSBen Gardon 
15502c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
156a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
15702c00b3aSBen Gardon 		if (root->role.word == role.word) {
15876eb54e7SBen Gardon 			kvm_tdp_mmu_get_root(kvm, root);
1596e6ec584SSean Christopherson 			goto out;
16002c00b3aSBen Gardon 		}
16102c00b3aSBen Gardon 	}
16202c00b3aSBen Gardon 
16302c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
16402c00b3aSBen Gardon 	root->root_count = 1;
16502c00b3aSBen Gardon 
16602c00b3aSBen Gardon 	list_add(&root->link, &kvm->arch.tdp_mmu_roots);
16702c00b3aSBen Gardon 
1686e6ec584SSean Christopherson out:
16902c00b3aSBen Gardon 	return __pa(root->spt);
170fe5db27dSBen Gardon }
1712f2fad08SBen Gardon 
1727cca2d0bSBen Gardon /*
1737cca2d0bSBen Gardon  * This is called through call_rcu in order to free TDP page table memory
1747cca2d0bSBen Gardon  * safely with respect to other kernel threads that may be operating on
1757cca2d0bSBen Gardon  * the memory.
1767cca2d0bSBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
1777cca2d0bSBen Gardon  * section, and freeing it after a grace period, lockless access to that
1787cca2d0bSBen Gardon  * memory won't use it after it is freed.
1797cca2d0bSBen Gardon  */
1807cca2d0bSBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
1817cca2d0bSBen Gardon {
1827cca2d0bSBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
1837cca2d0bSBen Gardon 					       rcu_head);
1847cca2d0bSBen Gardon 
1857cca2d0bSBen Gardon 	tdp_mmu_free_sp(sp);
1867cca2d0bSBen Gardon }
1877cca2d0bSBen Gardon 
1882f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1899a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
1909a77daacSBen Gardon 				bool shared);
1912f2fad08SBen Gardon 
192f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
193f8e14497SBen Gardon {
194f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
195f8e14497SBen Gardon 		return;
196f8e14497SBen Gardon 
197f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
19864bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
19964bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
200f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
201f8e14497SBen Gardon }
202f8e14497SBen Gardon 
203a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
204a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
205a6a0b05dSBen Gardon {
206a6a0b05dSBen Gardon 	bool pfn_changed;
207a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
208a6a0b05dSBen Gardon 
209a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
210a6a0b05dSBen Gardon 		return;
211a6a0b05dSBen Gardon 
212a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
213a6a0b05dSBen Gardon 
214a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
215a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
216a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
217fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
218a6a0b05dSBen Gardon 	}
219a6a0b05dSBen Gardon }
220a6a0b05dSBen Gardon 
2212f2fad08SBen Gardon /**
222a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
223a9442f59SBen Gardon  *
224a9442f59SBen Gardon  * @kvm: kvm instance
225a9442f59SBen Gardon  * @sp: the new page
2269a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2279a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2289a77daacSBen Gardon  *	    threads that might be adding or removing pages.
229a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
230a9442f59SBen Gardon  *		eventual reclaim.
231a9442f59SBen Gardon  */
232a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2339a77daacSBen Gardon 			      bool shared, bool account_nx)
234a9442f59SBen Gardon {
2359a77daacSBen Gardon 	if (shared)
2369a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2379a77daacSBen Gardon 	else
238a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
239a9442f59SBen Gardon 
240a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
241a9442f59SBen Gardon 	if (account_nx)
242a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2439a77daacSBen Gardon 
2449a77daacSBen Gardon 	if (shared)
2459a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
246a9442f59SBen Gardon }
247a9442f59SBen Gardon 
248a9442f59SBen Gardon /**
249a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
250a9442f59SBen Gardon  *
251a9442f59SBen Gardon  * @kvm: kvm instance
252a9442f59SBen Gardon  * @sp: the page to be removed
2539a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2549a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2559a77daacSBen Gardon  *	    threads that might be adding or removing pages.
256a9442f59SBen Gardon  */
2579a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2589a77daacSBen Gardon 				bool shared)
259a9442f59SBen Gardon {
2609a77daacSBen Gardon 	if (shared)
2619a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2629a77daacSBen Gardon 	else
263a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
264a9442f59SBen Gardon 
265a9442f59SBen Gardon 	list_del(&sp->link);
266a9442f59SBen Gardon 	if (sp->lpage_disallowed)
267a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2689a77daacSBen Gardon 
2699a77daacSBen Gardon 	if (shared)
2709a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
271a9442f59SBen Gardon }
272a9442f59SBen Gardon 
273a9442f59SBen Gardon /**
274a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
275a066e61fSBen Gardon  *
276a066e61fSBen Gardon  * @kvm: kvm instance
277a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2789a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2799a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
2809a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
281a066e61fSBen Gardon  *
282a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
283a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
28470fb3e41SBen Gardon  *
28570fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
28670fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
28770fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
28870fb3e41SBen Gardon  * early rcu_dereferences in the function.
289a066e61fSBen Gardon  */
29070fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
2919a77daacSBen Gardon 					bool shared)
292a066e61fSBen Gardon {
29370fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
294a066e61fSBen Gardon 	int level = sp->role.level;
295e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
296a066e61fSBen Gardon 	u64 old_child_spte;
2979a77daacSBen Gardon 	u64 *sptep;
298e25f0e0cSBen Gardon 	gfn_t gfn;
299a066e61fSBen Gardon 	int i;
300a066e61fSBen Gardon 
301a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
302a066e61fSBen Gardon 
3039a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
304a066e61fSBen Gardon 
305a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
30670fb3e41SBen Gardon 		sptep = rcu_dereference(pt) + i;
307e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3089a77daacSBen Gardon 
3099a77daacSBen Gardon 		if (shared) {
310e25f0e0cSBen Gardon 			/*
311e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
312e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
313e25f0e0cSBen Gardon 			 * already marked as removed then another thread
314e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
315e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
316e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
317e25f0e0cSBen Gardon 			 */
318e25f0e0cSBen Gardon 			for (;;) {
319e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
320e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
321e25f0e0cSBen Gardon 					break;
322e25f0e0cSBen Gardon 				cpu_relax();
323e25f0e0cSBen Gardon 			}
3249a77daacSBen Gardon 		} else {
3258df9f1afSSean Christopherson 			/*
3268df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3278df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3288df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3298df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3308df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3318df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3328df9f1afSSean Christopherson 			 * unreachable.
3338df9f1afSSean Christopherson 			 */
3349a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3358df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3368df9f1afSSean Christopherson 				continue;
337e25f0e0cSBen Gardon 
338e25f0e0cSBen Gardon 			/*
339e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
340e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
341e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
342e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
343e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
344e25f0e0cSBen Gardon 			 * the function.
345e25f0e0cSBen Gardon 			 */
346e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3479a77daacSBen Gardon 		}
348e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
349e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
350e25f0e0cSBen Gardon 				    shared);
351a066e61fSBen Gardon 	}
352a066e61fSBen Gardon 
353a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
354a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
355a066e61fSBen Gardon 
3567cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
357a066e61fSBen Gardon }
358a066e61fSBen Gardon 
359a066e61fSBen Gardon /**
3602f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3612f2fad08SBen Gardon  * @kvm: kvm instance
3622f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3632f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3642f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3652f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3662f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3679a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3689a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3699a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3702f2fad08SBen Gardon  *
3712f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3722f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3732f2fad08SBen Gardon  */
3742f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3759a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3769a77daacSBen Gardon 				  bool shared)
3772f2fad08SBen Gardon {
3782f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3792f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3802f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3812f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3822f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
3832f2fad08SBen Gardon 
3842f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
3852f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
386764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
3872f2fad08SBen Gardon 
3882f2fad08SBen Gardon 	/*
3892f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
3902f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
3912f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
3922f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
3932f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
3942f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
3952f2fad08SBen Gardon 	 * thread before replacement.
3962f2fad08SBen Gardon 	 */
3972f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
3982f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
3992f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4002f2fad08SBen Gardon 		       "different PFN!\n"
4012f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4022f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4032f2fad08SBen Gardon 
4042f2fad08SBen Gardon 		/*
4052f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4062f2fad08SBen Gardon 		 * courruption.
4072f2fad08SBen Gardon 		 */
4082f2fad08SBen Gardon 		BUG();
4092f2fad08SBen Gardon 	}
4102f2fad08SBen Gardon 
4112f2fad08SBen Gardon 	if (old_spte == new_spte)
4122f2fad08SBen Gardon 		return;
4132f2fad08SBen Gardon 
414b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
415b9a98c34SBen Gardon 
4162f2fad08SBen Gardon 	/*
4172f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4182f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4192f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4202f2fad08SBen Gardon 	 */
4212f2fad08SBen Gardon 	if (!was_present && !is_present) {
4222f2fad08SBen Gardon 		/*
42308f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
42408f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
42508f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
42608f07c80SBen Gardon 		 * are nonpresent.
4272f2fad08SBen Gardon 		 */
42808f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
42908f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
43008f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4312f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4322f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4332f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
43408f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
43508f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4362f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4372f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4382f2fad08SBen Gardon 		return;
4392f2fad08SBen Gardon 	}
4402f2fad08SBen Gardon 
4412f2fad08SBen Gardon 
4422f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
44364bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4442f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4452f2fad08SBen Gardon 
4462f2fad08SBen Gardon 	/*
4472f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4482f2fad08SBen Gardon 	 * the paging structure.
4492f2fad08SBen Gardon 	 */
450a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
451a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4529a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4532f2fad08SBen Gardon }
4542f2fad08SBen Gardon 
4552f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4569a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4579a77daacSBen Gardon 				bool shared)
4582f2fad08SBen Gardon {
4599a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4609a77daacSBen Gardon 			      shared);
461f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
462a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
463a6a0b05dSBen Gardon 				      new_spte, level);
4642f2fad08SBen Gardon }
465faaf05b0SBen Gardon 
466fe43fa2fSBen Gardon /*
4679a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4689a77daacSBen Gardon  * associated bookkeeping
4699a77daacSBen Gardon  *
4709a77daacSBen Gardon  * @kvm: kvm instance
4719a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4729a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4739a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4749a77daacSBen Gardon  *	    this function will have no side-effects.
4759a77daacSBen Gardon  */
4769a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4779a77daacSBen Gardon 					   struct tdp_iter *iter,
4789a77daacSBen Gardon 					   u64 new_spte)
4799a77daacSBen Gardon {
4809a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4819a77daacSBen Gardon 
48208f07c80SBen Gardon 	/*
48308f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
48408f07c80SBen Gardon 	 * may modify it.
48508f07c80SBen Gardon 	 */
4867a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
48708f07c80SBen Gardon 		return false;
48808f07c80SBen Gardon 
4899a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
4909a77daacSBen Gardon 		      new_spte) != iter->old_spte)
4919a77daacSBen Gardon 		return false;
4929a77daacSBen Gardon 
49308889894SSean Christopherson 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
49408889894SSean Christopherson 			    new_spte, iter->level, true);
4959a77daacSBen Gardon 
4969a77daacSBen Gardon 	return true;
4979a77daacSBen Gardon }
4989a77daacSBen Gardon 
49908f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
50008f07c80SBen Gardon 					   struct tdp_iter *iter)
50108f07c80SBen Gardon {
50208f07c80SBen Gardon 	/*
50308f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
50408f07c80SBen Gardon 	 * non-present value. This will stop other threads from
50508f07c80SBen Gardon 	 * immediately installing a present entry in its place
50608f07c80SBen Gardon 	 * before the TLBs are flushed.
50708f07c80SBen Gardon 	 */
50808f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
50908f07c80SBen Gardon 		return false;
51008f07c80SBen Gardon 
51108f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
51208f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
51308f07c80SBen Gardon 
51408f07c80SBen Gardon 	/*
51508f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
51608f07c80SBen Gardon 	 * must either wait on the MMU lock or use
51708f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
51808f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
51908f07c80SBen Gardon 	 * here since the SPTE is going from non-present
52008f07c80SBen Gardon 	 * to non-present.
52108f07c80SBen Gardon 	 */
52214f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
52308f07c80SBen Gardon 
52408f07c80SBen Gardon 	return true;
52508f07c80SBen Gardon }
52608f07c80SBen Gardon 
5279a77daacSBen Gardon 
5289a77daacSBen Gardon /*
529fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
530fe43fa2fSBen Gardon  * @kvm: kvm instance
531fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
532fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
533fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
534fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
535fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
536fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
537fe43fa2fSBen Gardon  *		      double counted.
538fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
539fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
540fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
541fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
542fe43fa2fSBen Gardon  *		      writes from being double counted.
543fe43fa2fSBen Gardon  */
544f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
545a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
546a6a0b05dSBen Gardon 				      bool record_dirty_log)
547faaf05b0SBen Gardon {
548531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5493a9a4aa5SBen Gardon 
55008f07c80SBen Gardon 	/*
55108f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
55208f07c80SBen Gardon 	 * temporary removed SPTE value.
55308f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
55408f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
55508f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
55608f07c80SBen Gardon 	 */
5577a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
55808f07c80SBen Gardon 
5597cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
560faaf05b0SBen Gardon 
56108889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
56208889894SSean Christopherson 			      new_spte, iter->level, false);
563f8e14497SBen Gardon 	if (record_acc_track)
564f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
565f8e14497SBen Gardon 					      iter->level);
566a6a0b05dSBen Gardon 	if (record_dirty_log)
56708889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
568a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
569a6a0b05dSBen Gardon 					      iter->level);
570f8e14497SBen Gardon }
571f8e14497SBen Gardon 
572f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
573f8e14497SBen Gardon 				    u64 new_spte)
574f8e14497SBen Gardon {
575a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
576f8e14497SBen Gardon }
577f8e14497SBen Gardon 
578f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
579f8e14497SBen Gardon 						 struct tdp_iter *iter,
580f8e14497SBen Gardon 						 u64 new_spte)
581f8e14497SBen Gardon {
582a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
583a6a0b05dSBen Gardon }
584a6a0b05dSBen Gardon 
585a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
586a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
587a6a0b05dSBen Gardon 						 u64 new_spte)
588a6a0b05dSBen Gardon {
589a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
590faaf05b0SBen Gardon }
591faaf05b0SBen Gardon 
592faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
593faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
594faaf05b0SBen Gardon 
595f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
596f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
597f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
598f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
599f8e14497SBen Gardon 			continue;					\
600f8e14497SBen Gardon 		else
601f8e14497SBen Gardon 
602bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
603bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
604bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
605bb18842eSBen Gardon 
606faaf05b0SBen Gardon /*
607e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
608e28a436cSBen Gardon  * to the scheduler.
609e28a436cSBen Gardon  *
610e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
611e139a34eSBen Gardon  * TLB flush before yielding.
612e139a34eSBen Gardon  *
613e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
614ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
615ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
616ed5e484bSBen Gardon  * paging structure root.
617e28a436cSBen Gardon  *
618e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
619e28a436cSBen Gardon  * Return false if a yield was not needed.
620e28a436cSBen Gardon  */
621e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
622e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
623a6a0b05dSBen Gardon {
624ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
625ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
626ed5e484bSBen Gardon 		return false;
627ed5e484bSBen Gardon 
628531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6297cca2d0bSBen Gardon 		rcu_read_unlock();
6307cca2d0bSBen Gardon 
631e139a34eSBen Gardon 		if (flush)
632e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
633e139a34eSBen Gardon 
634531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6357cca2d0bSBen Gardon 		rcu_read_lock();
636ed5e484bSBen Gardon 
637ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
638ed5e484bSBen Gardon 
639b601c3bcSBen Gardon 		tdp_iter_restart(iter);
640ed5e484bSBen Gardon 
641e28a436cSBen Gardon 		return true;
642a6a0b05dSBen Gardon 	}
643e28a436cSBen Gardon 
644e28a436cSBen Gardon 	return false;
645a6a0b05dSBen Gardon }
646a6a0b05dSBen Gardon 
647faaf05b0SBen Gardon /*
648faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
649faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
650faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
651faaf05b0SBen Gardon  * MMU lock.
652063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
653063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
654063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
655063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
656a835429cSSean Christopherson  * operation can cause a soft lockup.  Note, in some use cases a flush may be
657a835429cSSean Christopherson  * required by prior actions.  Ensure the pending flush is performed prior to
658a835429cSSean Christopherson  * yielding.
659faaf05b0SBen Gardon  */
660faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
661a835429cSSean Christopherson 			  gfn_t start, gfn_t end, bool can_yield, bool flush)
662faaf05b0SBen Gardon {
663faaf05b0SBen Gardon 	struct tdp_iter iter;
664faaf05b0SBen Gardon 
6657cca2d0bSBen Gardon 	rcu_read_lock();
6667cca2d0bSBen Gardon 
667faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6681af4a960SBen Gardon 		if (can_yield &&
669a835429cSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
670a835429cSSean Christopherson 			flush = false;
6711af4a960SBen Gardon 			continue;
6721af4a960SBen Gardon 		}
6731af4a960SBen Gardon 
674faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
675faaf05b0SBen Gardon 			continue;
676faaf05b0SBen Gardon 
677faaf05b0SBen Gardon 		/*
678faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
679faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
680faaf05b0SBen Gardon 		 * lower level.
681faaf05b0SBen Gardon 		 */
682faaf05b0SBen Gardon 		if ((iter.gfn < start ||
683faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
684faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
685faaf05b0SBen Gardon 			continue;
686faaf05b0SBen Gardon 
687faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
688a835429cSSean Christopherson 		flush = true;
689faaf05b0SBen Gardon 	}
6907cca2d0bSBen Gardon 
6917cca2d0bSBen Gardon 	rcu_read_unlock();
692a835429cSSean Christopherson 	return flush;
693faaf05b0SBen Gardon }
694faaf05b0SBen Gardon 
695faaf05b0SBen Gardon /*
696faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
697faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
698faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
699faaf05b0SBen Gardon  * MMU lock.
700faaf05b0SBen Gardon  */
7012b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
7022b9663d8SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
703faaf05b0SBen Gardon {
704faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
705faaf05b0SBen Gardon 
706a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
70733a31641SSean Christopherson 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
708faaf05b0SBen Gardon 
709faaf05b0SBen Gardon 	return flush;
710faaf05b0SBen Gardon }
711faaf05b0SBen Gardon 
712faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
713faaf05b0SBen Gardon {
714339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
7152b9663d8SSean Christopherson 	bool flush = false;
7162b9663d8SSean Christopherson 	int i;
717faaf05b0SBen Gardon 
7182b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
7192b9663d8SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
7202b9663d8SSean Christopherson 
721faaf05b0SBen Gardon 	if (flush)
722faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
723faaf05b0SBen Gardon }
724bb18842eSBen Gardon 
725bb18842eSBen Gardon /*
726bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
727bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
728bb18842eSBen Gardon  */
729bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
730bb18842eSBen Gardon 					  int map_writable,
731bb18842eSBen Gardon 					  struct tdp_iter *iter,
732bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
733bb18842eSBen Gardon {
734bb18842eSBen Gardon 	u64 new_spte;
735bb18842eSBen Gardon 	int ret = 0;
736bb18842eSBen Gardon 	int make_spte_ret = 0;
737bb18842eSBen Gardon 
7389a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
739bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7409a77daacSBen Gardon 	else
741bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
742bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
743bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
744bb18842eSBen Gardon 					 &new_spte);
745bb18842eSBen Gardon 
746bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
747bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7489a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7499a77daacSBen Gardon 		return RET_PF_RETRY;
750bb18842eSBen Gardon 
751bb18842eSBen Gardon 	/*
752bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
753bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
754bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
755bb18842eSBen Gardon 	 */
756bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
757bb18842eSBen Gardon 		if (write)
758bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
759bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
760bb18842eSBen Gardon 	}
761bb18842eSBen Gardon 
762bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7639a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7649a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7659a77daacSBen Gardon 				     new_spte);
766bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
7673849e092SSean Christopherson 	} else {
7689a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7699a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
7703849e092SSean Christopherson 	}
771bb18842eSBen Gardon 
772bb18842eSBen Gardon 	if (!prefault)
773bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
774bb18842eSBen Gardon 
775bb18842eSBen Gardon 	return ret;
776bb18842eSBen Gardon }
777bb18842eSBen Gardon 
778bb18842eSBen Gardon /*
779bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
780bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
781bb18842eSBen Gardon  */
782bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
783bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
784bb18842eSBen Gardon 		    bool prefault)
785bb18842eSBen Gardon {
786bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
787bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
788bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
789bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
790bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
791bb18842eSBen Gardon 	struct tdp_iter iter;
79289c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
793bb18842eSBen Gardon 	u64 *child_pt;
794bb18842eSBen Gardon 	u64 new_spte;
795bb18842eSBen Gardon 	int ret;
796bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
797bb18842eSBen Gardon 	int level;
798bb18842eSBen Gardon 	int req_level;
799bb18842eSBen Gardon 
800bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
801bb18842eSBen Gardon 		return RET_PF_RETRY;
802bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
803bb18842eSBen Gardon 		return RET_PF_RETRY;
804bb18842eSBen Gardon 
805bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
806bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
807bb18842eSBen Gardon 
808bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8097cca2d0bSBen Gardon 
8107cca2d0bSBen Gardon 	rcu_read_lock();
8117cca2d0bSBen Gardon 
812bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
813bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
814bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
815bb18842eSBen Gardon 						   iter.level, &pfn, &level);
816bb18842eSBen Gardon 
817bb18842eSBen Gardon 		if (iter.level == level)
818bb18842eSBen Gardon 			break;
819bb18842eSBen Gardon 
820bb18842eSBen Gardon 		/*
821bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
822bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
823bb18842eSBen Gardon 		 * with a non-leaf SPTE.
824bb18842eSBen Gardon 		 */
825bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
826bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
82708f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8289a77daacSBen Gardon 				break;
829bb18842eSBen Gardon 
830bb18842eSBen Gardon 			/*
831bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
832bb18842eSBen Gardon 			 * because the new value informs the !present
833bb18842eSBen Gardon 			 * path below.
834bb18842eSBen Gardon 			 */
8357cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
836bb18842eSBen Gardon 		}
837bb18842eSBen Gardon 
838bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
83989c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
84089c0fd49SBen Gardon 			child_pt = sp->spt;
841a9442f59SBen Gardon 
842bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
843bb18842eSBen Gardon 						     !shadow_accessed_mask);
844bb18842eSBen Gardon 
8459a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8469a77daacSBen Gardon 						    new_spte)) {
8479a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8489a77daacSBen Gardon 						  huge_page_disallowed &&
8499a77daacSBen Gardon 						  req_level >= iter.level);
8509a77daacSBen Gardon 
851bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8529a77daacSBen Gardon 			} else {
8539a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8549a77daacSBen Gardon 				break;
8559a77daacSBen Gardon 			}
856bb18842eSBen Gardon 		}
857bb18842eSBen Gardon 	}
858bb18842eSBen Gardon 
8599a77daacSBen Gardon 	if (iter.level != level) {
8607cca2d0bSBen Gardon 		rcu_read_unlock();
861bb18842eSBen Gardon 		return RET_PF_RETRY;
8627cca2d0bSBen Gardon 	}
863bb18842eSBen Gardon 
864bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
865bb18842eSBen Gardon 					      pfn, prefault);
8667cca2d0bSBen Gardon 	rcu_read_unlock();
867bb18842eSBen Gardon 
868bb18842eSBen Gardon 	return ret;
869bb18842eSBen Gardon }
870063afacdSBen Gardon 
8713039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
8723039bcc7SSean Christopherson 				 bool flush)
8733039bcc7SSean Christopherson {
8743039bcc7SSean Christopherson 	struct kvm_mmu_page *root;
875c1b91493SSean Christopherson 
8763039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
8773039bcc7SSean Christopherson 		flush |= zap_gfn_range(kvm, root, range->start, range->end,
878e1eed584SSean Christopherson 				       range->may_block, flush);
8793039bcc7SSean Christopherson 
8803039bcc7SSean Christopherson 	return flush;
8813039bcc7SSean Christopherson }
8823039bcc7SSean Christopherson 
8833039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
8843039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
8853039bcc7SSean Christopherson 
8863039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
8873039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
888c1b91493SSean Christopherson 						   tdp_handler_t handler)
889063afacdSBen Gardon {
890063afacdSBen Gardon 	struct kvm_mmu_page *root;
8913039bcc7SSean Christopherson 	struct tdp_iter iter;
8923039bcc7SSean Christopherson 	bool ret = false;
893063afacdSBen Gardon 
8943039bcc7SSean Christopherson 	rcu_read_lock();
895063afacdSBen Gardon 
896e1eed584SSean Christopherson 	/*
897e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
898e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
899e1eed584SSean Christopherson 	 */
9003039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
9013039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
9023039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
9033039bcc7SSean Christopherson 	}
904063afacdSBen Gardon 
9053039bcc7SSean Christopherson 	rcu_read_unlock();
906063afacdSBen Gardon 
907063afacdSBen Gardon 	return ret;
908063afacdSBen Gardon }
909063afacdSBen Gardon 
910f8e14497SBen Gardon /*
911f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
912f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
913f8e14497SBen Gardon  */
9143039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
9153039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
916f8e14497SBen Gardon {
9173039bcc7SSean Christopherson 	u64 new_spte = 0;
918f8e14497SBen Gardon 
9193039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
9203039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
9213039bcc7SSean Christopherson 		return false;
9227cca2d0bSBen Gardon 
9233039bcc7SSean Christopherson 	new_spte = iter->old_spte;
924f8e14497SBen Gardon 
925f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
9268f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
927f8e14497SBen Gardon 	} else {
928f8e14497SBen Gardon 		/*
929f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
930f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
931f8e14497SBen Gardon 		 */
932f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
933f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
934f8e14497SBen Gardon 
935f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
936f8e14497SBen Gardon 	}
937f8e14497SBen Gardon 
9383039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
9393039bcc7SSean Christopherson 
9403039bcc7SSean Christopherson 	return true;
941f8e14497SBen Gardon }
942f8e14497SBen Gardon 
9433039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
944f8e14497SBen Gardon {
9453039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
946f8e14497SBen Gardon }
947f8e14497SBen Gardon 
9483039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
9493039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
950f8e14497SBen Gardon {
9513039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
952f8e14497SBen Gardon }
953f8e14497SBen Gardon 
9543039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
955f8e14497SBen Gardon {
9563039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
9573039bcc7SSean Christopherson }
9583039bcc7SSean Christopherson 
9593039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
9603039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
9613039bcc7SSean Christopherson {
9623039bcc7SSean Christopherson 	u64 new_spte;
9633039bcc7SSean Christopherson 
9643039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
9653039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
9663039bcc7SSean Christopherson 
9673039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
9683039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
9693039bcc7SSean Christopherson 		return false;
9703039bcc7SSean Christopherson 
9713039bcc7SSean Christopherson 	/*
9723039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
9733039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
9743039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
9753039bcc7SSean Christopherson 	 * See __handle_changed_spte().
9763039bcc7SSean Christopherson 	 */
9773039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
9783039bcc7SSean Christopherson 
9793039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
9803039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
9813039bcc7SSean Christopherson 								  pte_pfn(range->pte));
9823039bcc7SSean Christopherson 
9833039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
9843039bcc7SSean Christopherson 	}
9853039bcc7SSean Christopherson 
9863039bcc7SSean Christopherson 	return true;
987f8e14497SBen Gardon }
9881d8dd6b3SBen Gardon 
9891d8dd6b3SBen Gardon /*
9901d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
9911d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
9921d8dd6b3SBen Gardon  * notifier.
9931d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
9941d8dd6b3SBen Gardon  */
9953039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
9961d8dd6b3SBen Gardon {
9973039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
9981d8dd6b3SBen Gardon 
9993039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
10003039bcc7SSean Christopherson 	if (flush)
10013039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
10027cca2d0bSBen Gardon 
10033039bcc7SSean Christopherson 	return false;
10041d8dd6b3SBen Gardon }
10051d8dd6b3SBen Gardon 
1006a6a0b05dSBen Gardon /*
1007a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1008a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1009a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1010a6a0b05dSBen Gardon  */
1011a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1012a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1013a6a0b05dSBen Gardon {
1014a6a0b05dSBen Gardon 	struct tdp_iter iter;
1015a6a0b05dSBen Gardon 	u64 new_spte;
1016a6a0b05dSBen Gardon 	bool spte_set = false;
1017a6a0b05dSBen Gardon 
10187cca2d0bSBen Gardon 	rcu_read_lock();
10197cca2d0bSBen Gardon 
1020a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1021a6a0b05dSBen Gardon 
1022a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1023a6a0b05dSBen Gardon 				   min_level, start, end) {
10241af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10251af4a960SBen Gardon 			continue;
10261af4a960SBen Gardon 
1027a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
10280f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
10290f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1030a6a0b05dSBen Gardon 			continue;
1031a6a0b05dSBen Gardon 
1032a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1033a6a0b05dSBen Gardon 
1034a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1035a6a0b05dSBen Gardon 		spte_set = true;
1036a6a0b05dSBen Gardon 	}
10377cca2d0bSBen Gardon 
10387cca2d0bSBen Gardon 	rcu_read_unlock();
1039a6a0b05dSBen Gardon 	return spte_set;
1040a6a0b05dSBen Gardon }
1041a6a0b05dSBen Gardon 
1042a6a0b05dSBen Gardon /*
1043a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1044a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1045a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1046a6a0b05dSBen Gardon  */
1047a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1048a6a0b05dSBen Gardon 			     int min_level)
1049a6a0b05dSBen Gardon {
1050a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1051a6a0b05dSBen Gardon 	bool spte_set = false;
1052a6a0b05dSBen Gardon 
1053a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1054a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1055a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1056a6a0b05dSBen Gardon 
1057a6a0b05dSBen Gardon 	return spte_set;
1058a6a0b05dSBen Gardon }
1059a6a0b05dSBen Gardon 
1060a6a0b05dSBen Gardon /*
1061a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1062a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1063a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1064a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1065a6a0b05dSBen Gardon  * be flushed.
1066a6a0b05dSBen Gardon  */
1067a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1068a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1069a6a0b05dSBen Gardon {
1070a6a0b05dSBen Gardon 	struct tdp_iter iter;
1071a6a0b05dSBen Gardon 	u64 new_spte;
1072a6a0b05dSBen Gardon 	bool spte_set = false;
1073a6a0b05dSBen Gardon 
10747cca2d0bSBen Gardon 	rcu_read_lock();
10757cca2d0bSBen Gardon 
1076a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
10771af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
10781af4a960SBen Gardon 			continue;
10791af4a960SBen Gardon 
1080a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1081a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1082a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1083a6a0b05dSBen Gardon 			else
1084a6a0b05dSBen Gardon 				continue;
1085a6a0b05dSBen Gardon 		} else {
1086a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1087a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1088a6a0b05dSBen Gardon 			else
1089a6a0b05dSBen Gardon 				continue;
1090a6a0b05dSBen Gardon 		}
1091a6a0b05dSBen Gardon 
1092a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1093a6a0b05dSBen Gardon 		spte_set = true;
1094a6a0b05dSBen Gardon 	}
10957cca2d0bSBen Gardon 
10967cca2d0bSBen Gardon 	rcu_read_unlock();
1097a6a0b05dSBen Gardon 	return spte_set;
1098a6a0b05dSBen Gardon }
1099a6a0b05dSBen Gardon 
1100a6a0b05dSBen Gardon /*
1101a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1102a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1103a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1104a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1105a6a0b05dSBen Gardon  * be flushed.
1106a6a0b05dSBen Gardon  */
1107a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1108a6a0b05dSBen Gardon {
1109a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1110a6a0b05dSBen Gardon 	bool spte_set = false;
1111a6a0b05dSBen Gardon 
1112a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1113a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1114a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1115a6a0b05dSBen Gardon 
1116a6a0b05dSBen Gardon 	return spte_set;
1117a6a0b05dSBen Gardon }
1118a6a0b05dSBen Gardon 
1119a6a0b05dSBen Gardon /*
1120a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1121a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1122a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1123a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1124a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1125a6a0b05dSBen Gardon  */
1126a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1127a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1128a6a0b05dSBen Gardon {
1129a6a0b05dSBen Gardon 	struct tdp_iter iter;
1130a6a0b05dSBen Gardon 	u64 new_spte;
1131a6a0b05dSBen Gardon 
11327cca2d0bSBen Gardon 	rcu_read_lock();
11337cca2d0bSBen Gardon 
1134a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1135a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1136a6a0b05dSBen Gardon 		if (!mask)
1137a6a0b05dSBen Gardon 			break;
1138a6a0b05dSBen Gardon 
1139a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1140a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1141a6a0b05dSBen Gardon 			continue;
1142a6a0b05dSBen Gardon 
1143f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1144f1b3b06aSBen Gardon 
1145a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1146a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1147a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1148a6a0b05dSBen Gardon 			else
1149a6a0b05dSBen Gardon 				continue;
1150a6a0b05dSBen Gardon 		} else {
1151a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1152a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1153a6a0b05dSBen Gardon 			else
1154a6a0b05dSBen Gardon 				continue;
1155a6a0b05dSBen Gardon 		}
1156a6a0b05dSBen Gardon 
1157a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1158a6a0b05dSBen Gardon 	}
11597cca2d0bSBen Gardon 
11607cca2d0bSBen Gardon 	rcu_read_unlock();
1161a6a0b05dSBen Gardon }
1162a6a0b05dSBen Gardon 
1163a6a0b05dSBen Gardon /*
1164a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1165a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1166a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1167a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1168a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1169a6a0b05dSBen Gardon  */
1170a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1171a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1172a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1173a6a0b05dSBen Gardon 				       bool wrprot)
1174a6a0b05dSBen Gardon {
1175a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1176a6a0b05dSBen Gardon 
1177531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1178a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1179a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1180a6a0b05dSBen Gardon }
1181a6a0b05dSBen Gardon 
1182a6a0b05dSBen Gardon /*
118387aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
118487aa9ec9SBen Gardon  * GFNs within the slot.
118514881998SBen Gardon  */
1186af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm,
118714881998SBen Gardon 				       struct kvm_mmu_page *root,
11888ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
1189af95b53eSSean Christopherson 				       bool flush)
119014881998SBen Gardon {
11919eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
11929eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
119314881998SBen Gardon 	struct tdp_iter iter;
119414881998SBen Gardon 	kvm_pfn_t pfn;
119514881998SBen Gardon 
11967cca2d0bSBen Gardon 	rcu_read_lock();
11977cca2d0bSBen Gardon 
119814881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
1199af95b53eSSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
1200af95b53eSSean Christopherson 			flush = false;
12011af4a960SBen Gardon 			continue;
12021af4a960SBen Gardon 		}
12031af4a960SBen Gardon 
120414881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
120587aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
120614881998SBen Gardon 			continue;
120714881998SBen Gardon 
120814881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
120914881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
12109eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
12119eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
121214881998SBen Gardon 			continue;
121314881998SBen Gardon 
121414881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
121514881998SBen Gardon 
1216af95b53eSSean Christopherson 		flush = true;
121714881998SBen Gardon 	}
121814881998SBen Gardon 
12197cca2d0bSBen Gardon 	rcu_read_unlock();
1220af95b53eSSean Christopherson 
1221af95b53eSSean Christopherson 	return flush;
122214881998SBen Gardon }
122314881998SBen Gardon 
122414881998SBen Gardon /*
122514881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
122614881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
122714881998SBen Gardon  */
1228142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
12298ca6f063SBen Gardon 				       const struct kvm_memory_slot *slot,
12308ca6f063SBen Gardon 				       bool flush)
123114881998SBen Gardon {
123214881998SBen Gardon 	struct kvm_mmu_page *root;
123314881998SBen Gardon 
1234a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1235af95b53eSSean Christopherson 		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1236af95b53eSSean Christopherson 
1237142ccde1SSean Christopherson 	return flush;
123814881998SBen Gardon }
123946044f72SBen Gardon 
124046044f72SBen Gardon /*
124146044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12425fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
124346044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
124446044f72SBen Gardon  */
124546044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
124646044f72SBen Gardon 			      gfn_t gfn)
124746044f72SBen Gardon {
124846044f72SBen Gardon 	struct tdp_iter iter;
124946044f72SBen Gardon 	u64 new_spte;
125046044f72SBen Gardon 	bool spte_set = false;
125146044f72SBen Gardon 
12527cca2d0bSBen Gardon 	rcu_read_lock();
12537cca2d0bSBen Gardon 
125446044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
125546044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
125646044f72SBen Gardon 			break;
125746044f72SBen Gardon 
125846044f72SBen Gardon 		new_spte = iter.old_spte &
12595fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
126046044f72SBen Gardon 
126146044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
126246044f72SBen Gardon 		spte_set = true;
126346044f72SBen Gardon 	}
126446044f72SBen Gardon 
12657cca2d0bSBen Gardon 	rcu_read_unlock();
12667cca2d0bSBen Gardon 
126746044f72SBen Gardon 	return spte_set;
126846044f72SBen Gardon }
126946044f72SBen Gardon 
127046044f72SBen Gardon /*
127146044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
12725fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
127346044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
127446044f72SBen Gardon  */
127546044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
127646044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
127746044f72SBen Gardon {
127846044f72SBen Gardon 	struct kvm_mmu_page *root;
127946044f72SBen Gardon 	bool spte_set = false;
128046044f72SBen Gardon 
1281531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1282a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
128346044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
1284a3f15bdaSSean Christopherson 
128546044f72SBen Gardon 	return spte_set;
128646044f72SBen Gardon }
128746044f72SBen Gardon 
128895fb5b02SBen Gardon /*
128995fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
129095fb5b02SBen Gardon  * That SPTE may be non-present.
129195fb5b02SBen Gardon  */
129239b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
129339b4d43eSSean Christopherson 			 int *root_level)
129495fb5b02SBen Gardon {
129595fb5b02SBen Gardon 	struct tdp_iter iter;
129695fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
129795fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
12982aa07893SSean Christopherson 	int leaf = -1;
129995fb5b02SBen Gardon 
130039b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
130195fb5b02SBen Gardon 
13027cca2d0bSBen Gardon 	rcu_read_lock();
13037cca2d0bSBen Gardon 
130495fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
130595fb5b02SBen Gardon 		leaf = iter.level;
1306dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
130795fb5b02SBen Gardon 	}
130895fb5b02SBen Gardon 
13097cca2d0bSBen Gardon 	rcu_read_unlock();
13107cca2d0bSBen Gardon 
131195fb5b02SBen Gardon 	return leaf;
131295fb5b02SBen Gardon }
1313