xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 77c8cd6b)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
32226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */
33226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
346103bc07SBen Gardon 							     bool shared)
356103bc07SBen Gardon {
366103bc07SBen Gardon 	if (shared)
376103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
386103bc07SBen Gardon 	else
396103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
40226b8c8fSSean Christopherson 
41226b8c8fSSean Christopherson 	return true;
426103bc07SBen Gardon }
436103bc07SBen Gardon 
44fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
45fe5db27dSBen Gardon {
46fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
47fe5db27dSBen Gardon 		return;
4802c00b3aSBen Gardon 
49524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
5002c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
517cca2d0bSBen Gardon 
527cca2d0bSBen Gardon 	/*
537cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
547cca2d0bSBen Gardon 	 * can run before the VM is torn down.
557cca2d0bSBen Gardon 	 */
567cca2d0bSBen Gardon 	rcu_barrier();
5702c00b3aSBen Gardon }
5802c00b3aSBen Gardon 
592bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
606103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
616103bc07SBen Gardon 			  bool shared);
622bdb3d84SBen Gardon 
632bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
64a889ea54SBen Gardon {
652bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
662bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
67a889ea54SBen Gardon }
68a889ea54SBen Gardon 
69c0e64238SBen Gardon /*
70c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
71c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
72c0e64238SBen Gardon  * the memory.
73c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
74c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
75c0e64238SBen Gardon  * memory won't use it after it is freed.
76c0e64238SBen Gardon  */
77c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
78a889ea54SBen Gardon {
79c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
80c0e64238SBen Gardon 					       rcu_head);
81a889ea54SBen Gardon 
82c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
83a889ea54SBen Gardon }
84a889ea54SBen Gardon 
856103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
866103bc07SBen Gardon 			  bool shared)
872bdb3d84SBen Gardon {
886103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
892bdb3d84SBen Gardon 
9011cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
912bdb3d84SBen Gardon 		return;
922bdb3d84SBen Gardon 
932bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
942bdb3d84SBen Gardon 
95c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
96c0e64238SBen Gardon 	list_del_rcu(&root->link);
97c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
982bdb3d84SBen Gardon 
99db01416bSSean Christopherson 	/*
100db01416bSSean Christopherson 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
101db01416bSSean Christopherson 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
102db01416bSSean Christopherson 	 * to a different pCPU.  Note, the local TLB flush on reuse also
103db01416bSSean Christopherson 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
104db01416bSSean Christopherson 	 * intermediate paging structures, that may be zapped, as such entries
105db01416bSSean Christopherson 	 * are associated with the ASID on both VMX and SVM.
106db01416bSSean Christopherson 	 */
107db01416bSSean Christopherson 	(void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
1082bdb3d84SBen Gardon 
109c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
110a889ea54SBen Gardon }
111a889ea54SBen Gardon 
112cfc10997SBen Gardon /*
113d62007edSSean Christopherson  * Returns the next root after @prev_root (or the first root if @prev_root is
114d62007edSSean Christopherson  * NULL).  A reference to the returned root is acquired, and the reference to
115d62007edSSean Christopherson  * @prev_root is released (the caller obviously must hold a reference to
116d62007edSSean Christopherson  * @prev_root if it's non-NULL).
117d62007edSSean Christopherson  *
118d62007edSSean Christopherson  * If @only_valid is true, invalid roots are skipped.
119d62007edSSean Christopherson  *
120d62007edSSean Christopherson  * Returns NULL if the end of tdp_mmu_roots was reached.
121cfc10997SBen Gardon  */
122cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1236103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
124d62007edSSean Christopherson 					      bool shared, bool only_valid)
125a889ea54SBen Gardon {
126a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
127a889ea54SBen Gardon 
128c0e64238SBen Gardon 	rcu_read_lock();
129c0e64238SBen Gardon 
130cfc10997SBen Gardon 	if (prev_root)
131c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132c0e64238SBen Gardon 						  &prev_root->link,
133c0e64238SBen Gardon 						  typeof(*prev_root), link);
134cfc10997SBen Gardon 	else
135c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
136cfc10997SBen Gardon 						   typeof(*next_root), link);
137cfc10997SBen Gardon 
13804dc4e6cSSean Christopherson 	while (next_root) {
139d62007edSSean Christopherson 		if ((!only_valid || !next_root->role.invalid) &&
140ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(next_root))
14104dc4e6cSSean Christopherson 			break;
14204dc4e6cSSean Christopherson 
143c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
144c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
14504dc4e6cSSean Christopherson 	}
146fb101293SBen Gardon 
147c0e64238SBen Gardon 	rcu_read_unlock();
148cfc10997SBen Gardon 
149cfc10997SBen Gardon 	if (prev_root)
1506103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
151cfc10997SBen Gardon 
152a889ea54SBen Gardon 	return next_root;
153a889ea54SBen Gardon }
154a889ea54SBen Gardon 
155a889ea54SBen Gardon /*
156a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
157a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
158a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
159a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1606103bc07SBen Gardon  *
1616103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1626103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1636103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
164a889ea54SBen Gardon  */
165d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
166d62007edSSean Christopherson 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
167cfc10997SBen Gardon 	     _root;								\
168d62007edSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
169614f6970SPaolo Bonzini 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
170614f6970SPaolo Bonzini 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
171a3f15bdaSSean Christopherson 		} else
172a889ea54SBen Gardon 
173d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
174d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
175d62007edSSean Christopherson 
176614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
177614f6970SPaolo Bonzini 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
178d62007edSSean Christopherson 
179226b8c8fSSean Christopherson /*
180226b8c8fSSean Christopherson  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
181226b8c8fSSean Christopherson  * the implication being that any flow that holds mmu_lock for read is
182226b8c8fSSean Christopherson  * inherently yield-friendly and should use the yield-safe variant above.
183226b8c8fSSean Christopherson  * Holding mmu_lock for write obviates the need for RCU protection as the list
184226b8c8fSSean Christopherson  * is guaranteed to be stable.
185226b8c8fSSean Christopherson  */
186a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
187226b8c8fSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
188226b8c8fSSean Christopherson 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
189226b8c8fSSean Christopherson 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
190a3f15bdaSSean Christopherson 		} else
19102c00b3aSBen Gardon 
192a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
19302c00b3aSBen Gardon {
19402c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
19502c00b3aSBen Gardon 
19602c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
19702c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
198a82070b6SDavid Matlack 
199a82070b6SDavid Matlack 	return sp;
200a82070b6SDavid Matlack }
201a82070b6SDavid Matlack 
202c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
203c10743a1SSean Christopherson 			    gfn_t gfn, union kvm_mmu_page_role role)
204a82070b6SDavid Matlack {
20502c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
20602c00b3aSBen Gardon 
207a3aca4deSDavid Matlack 	sp->role = role;
20802c00b3aSBen Gardon 	sp->gfn = gfn;
209c10743a1SSean Christopherson 	sp->ptep = sptep;
21002c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
21102c00b3aSBen Gardon 
21233dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
21302c00b3aSBen Gardon }
21402c00b3aSBen Gardon 
215a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
216a3aca4deSDavid Matlack 				  struct tdp_iter *iter)
217a3aca4deSDavid Matlack {
218a3aca4deSDavid Matlack 	struct kvm_mmu_page *parent_sp;
219a3aca4deSDavid Matlack 	union kvm_mmu_page_role role;
220a3aca4deSDavid Matlack 
221a3aca4deSDavid Matlack 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
222a3aca4deSDavid Matlack 
223a3aca4deSDavid Matlack 	role = parent_sp->role;
224a3aca4deSDavid Matlack 	role.level--;
225a3aca4deSDavid Matlack 
226c10743a1SSean Christopherson 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
227a3aca4deSDavid Matlack }
228a3aca4deSDavid Matlack 
2296e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
23002c00b3aSBen Gardon {
231a3aca4deSDavid Matlack 	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
23202c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
23302c00b3aSBen Gardon 	struct kvm_mmu_page *root;
23402c00b3aSBen Gardon 
2356e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
23602c00b3aSBen Gardon 
23704dc4e6cSSean Christopherson 	/*
23804dc4e6cSSean Christopherson 	 * Check for an existing root before allocating a new one.  Note, the
23904dc4e6cSSean Christopherson 	 * role check prevents consuming an invalid root.
24004dc4e6cSSean Christopherson 	 */
241a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
242fb101293SBen Gardon 		if (root->role.word == role.word &&
243ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(root))
2446e6ec584SSean Christopherson 			goto out;
24502c00b3aSBen Gardon 	}
24602c00b3aSBen Gardon 
247a82070b6SDavid Matlack 	root = tdp_mmu_alloc_sp(vcpu);
248c10743a1SSean Christopherson 	tdp_mmu_init_sp(root, NULL, 0, role);
249a82070b6SDavid Matlack 
25011cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
25102c00b3aSBen Gardon 
252c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
253c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
254c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
25502c00b3aSBen Gardon 
2566e6ec584SSean Christopherson out:
25702c00b3aSBen Gardon 	return __pa(root->spt);
258fe5db27dSBen Gardon }
2592f2fad08SBen Gardon 
2602f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2619a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2629a77daacSBen Gardon 				bool shared);
2632f2fad08SBen Gardon 
264f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
265f8e14497SBen Gardon {
266f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
267f8e14497SBen Gardon 		return;
268f8e14497SBen Gardon 
269f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
27064bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
27164bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
272f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
273f8e14497SBen Gardon }
274f8e14497SBen Gardon 
275a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
276a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
277a6a0b05dSBen Gardon {
278a6a0b05dSBen Gardon 	bool pfn_changed;
279a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
280a6a0b05dSBen Gardon 
281a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
282a6a0b05dSBen Gardon 		return;
283a6a0b05dSBen Gardon 
284a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
285a6a0b05dSBen Gardon 
286a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
287a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
288a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
289fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
290a6a0b05dSBen Gardon 	}
291a6a0b05dSBen Gardon }
292a6a0b05dSBen Gardon 
2932f2fad08SBen Gardon /**
294c298a30cSDavid Matlack  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
295a9442f59SBen Gardon  *
296a9442f59SBen Gardon  * @kvm: kvm instance
297a9442f59SBen Gardon  * @sp: the page to be removed
2989a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2999a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3009a77daacSBen Gardon  *	    threads that might be adding or removing pages.
301a9442f59SBen Gardon  */
302c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
3039a77daacSBen Gardon 			      bool shared)
304a9442f59SBen Gardon {
3059a77daacSBen Gardon 	if (shared)
3069a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
3079a77daacSBen Gardon 	else
308a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
309a9442f59SBen Gardon 
310a9442f59SBen Gardon 	list_del(&sp->link);
311a9442f59SBen Gardon 	if (sp->lpage_disallowed)
312a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
3139a77daacSBen Gardon 
3149a77daacSBen Gardon 	if (shared)
3159a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
316a9442f59SBen Gardon }
317a9442f59SBen Gardon 
318a9442f59SBen Gardon /**
3190f53dfa3SDavid Matlack  * handle_removed_pt() - handle a page table removed from the TDP structure
320a066e61fSBen Gardon  *
321a066e61fSBen Gardon  * @kvm: kvm instance
322a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3239a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3249a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3259a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
326a066e61fSBen Gardon  *
327a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
328a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
32970fb3e41SBen Gardon  *
33070fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
33170fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
33270fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
33370fb3e41SBen Gardon  * early rcu_dereferences in the function.
334a066e61fSBen Gardon  */
3350f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
336a066e61fSBen Gardon {
33770fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
338a066e61fSBen Gardon 	int level = sp->role.level;
339e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
340a066e61fSBen Gardon 	int i;
341a066e61fSBen Gardon 
342a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
343a066e61fSBen Gardon 
344c298a30cSDavid Matlack 	tdp_mmu_unlink_sp(kvm, sp, shared);
345a066e61fSBen Gardon 
346a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
347574c3c55SBen Gardon 		u64 *sptep = rcu_dereference(pt) + i;
348574c3c55SBen Gardon 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
349574c3c55SBen Gardon 		u64 old_child_spte;
3509a77daacSBen Gardon 
3519a77daacSBen Gardon 		if (shared) {
352e25f0e0cSBen Gardon 			/*
353e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
354e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
355e25f0e0cSBen Gardon 			 * already marked as removed then another thread
356e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
357e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
358e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
359e25f0e0cSBen Gardon 			 */
360e25f0e0cSBen Gardon 			for (;;) {
361e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
362e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
363e25f0e0cSBen Gardon 					break;
364e25f0e0cSBen Gardon 				cpu_relax();
365e25f0e0cSBen Gardon 			}
3669a77daacSBen Gardon 		} else {
3678df9f1afSSean Christopherson 			/*
3688df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3698df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3708df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3718df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3728df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3738df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3748df9f1afSSean Christopherson 			 * unreachable.
3758df9f1afSSean Christopherson 			 */
3769a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3778df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3788df9f1afSSean Christopherson 				continue;
379e25f0e0cSBen Gardon 
380e25f0e0cSBen Gardon 			/*
381e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
382e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
383e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
384e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
385e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
386e25f0e0cSBen Gardon 			 * the function.
387e25f0e0cSBen Gardon 			 */
388e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3899a77daacSBen Gardon 		}
390e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
391f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
392e25f0e0cSBen Gardon 				    shared);
393a066e61fSBen Gardon 	}
394a066e61fSBen Gardon 
395574c3c55SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
396f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
397a066e61fSBen Gardon 
3987cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
399a066e61fSBen Gardon }
400a066e61fSBen Gardon 
401a066e61fSBen Gardon /**
4027f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
4032f2fad08SBen Gardon  * @kvm: kvm instance
4042f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
4052f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
4062f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
4072f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
4082f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
4099a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
4109a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
4119a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
4122f2fad08SBen Gardon  *
4132f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
4142f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
4152f2fad08SBen Gardon  */
4162f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4179a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
4189a77daacSBen Gardon 				  bool shared)
4192f2fad08SBen Gardon {
4202f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4212f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4222f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4232f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4242f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4252f2fad08SBen Gardon 
4262f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4272f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
428764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4292f2fad08SBen Gardon 
4302f2fad08SBen Gardon 	/*
4312f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4322f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4332f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
434d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4352f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4362f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4372f2fad08SBen Gardon 	 * thread before replacement.
4382f2fad08SBen Gardon 	 */
4392f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4402f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4412f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4422f2fad08SBen Gardon 		       "different PFN!\n"
4432f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4442f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4452f2fad08SBen Gardon 
4462f2fad08SBen Gardon 		/*
4472f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
448d9f6e12fSIngo Molnar 		 * corruption.
4492f2fad08SBen Gardon 		 */
4502f2fad08SBen Gardon 		BUG();
4512f2fad08SBen Gardon 	}
4522f2fad08SBen Gardon 
4532f2fad08SBen Gardon 	if (old_spte == new_spte)
4542f2fad08SBen Gardon 		return;
4552f2fad08SBen Gardon 
456b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
457b9a98c34SBen Gardon 
458115111efSDavid Matlack 	if (is_leaf)
459115111efSDavid Matlack 		check_spte_writable_invariants(new_spte);
460115111efSDavid Matlack 
4612f2fad08SBen Gardon 	/*
4622f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4632f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4642f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4652f2fad08SBen Gardon 	 */
4662f2fad08SBen Gardon 	if (!was_present && !is_present) {
4672f2fad08SBen Gardon 		/*
46808f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
46908f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
47008f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
47108f07c80SBen Gardon 		 * are nonpresent.
4722f2fad08SBen Gardon 		 */
47308f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
47408f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
47508f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4762f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4772f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4782f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
47908f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
48008f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4812f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4822f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4832f2fad08SBen Gardon 		return;
4842f2fad08SBen Gardon 	}
4852f2fad08SBen Gardon 
48671f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
48771f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4882f2fad08SBen Gardon 
4892f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
49064bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4912f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4922f2fad08SBen Gardon 
4932f2fad08SBen Gardon 	/*
4942f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
495c8e5a0d0SSean Christopherson 	 * the paging structure.  Note the WARN on the PFN changing without the
496c8e5a0d0SSean Christopherson 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
497c8e5a0d0SSean Christopherson 	 * pages are kernel allocations and should never be migrated.
4982f2fad08SBen Gardon 	 */
499c8e5a0d0SSean Christopherson 	if (was_present && !was_leaf &&
500c8e5a0d0SSean Christopherson 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
5010f53dfa3SDavid Matlack 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
5022f2fad08SBen Gardon }
5032f2fad08SBen Gardon 
5042f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
5059a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
5069a77daacSBen Gardon 				bool shared)
5072f2fad08SBen Gardon {
5089a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
5099a77daacSBen Gardon 			      shared);
510f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
511a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
512a6a0b05dSBen Gardon 				      new_spte, level);
5132f2fad08SBen Gardon }
514faaf05b0SBen Gardon 
515fe43fa2fSBen Gardon /*
5166ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
5176ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
51824ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
5199a77daacSBen Gardon  *
5203255530aSDavid Matlack  * If setting the SPTE fails because it has changed, iter->old_spte will be
5213255530aSDavid Matlack  * refreshed to the current value of the spte.
5223255530aSDavid Matlack  *
5239a77daacSBen Gardon  * @kvm: kvm instance
5249a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5259a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5263e72c791SDavid Matlack  * Return:
5273e72c791SDavid Matlack  * * 0      - If the SPTE was set.
5283e72c791SDavid Matlack  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
5293e72c791SDavid Matlack  *            no side-effects other than setting iter->old_spte to the last
5303e72c791SDavid Matlack  *            known value of the spte.
5319a77daacSBen Gardon  */
5323e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
5339a77daacSBen Gardon 					  struct tdp_iter *iter,
5349a77daacSBen Gardon 					  u64 new_spte)
5359a77daacSBen Gardon {
5363255530aSDavid Matlack 	u64 *sptep = rcu_dereference(iter->sptep);
5373255530aSDavid Matlack 	u64 old_spte;
5383255530aSDavid Matlack 
5393a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
5403a0f64deSSean Christopherson 
5419a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5429a77daacSBen Gardon 
54308f07c80SBen Gardon 	/*
54408f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
54508f07c80SBen Gardon 	 * may modify it.
54608f07c80SBen Gardon 	 */
5477a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
5483e72c791SDavid Matlack 		return -EBUSY;
54908f07c80SBen Gardon 
5506e8eb206SDavid Matlack 	/*
5516e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5526e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5536e8eb206SDavid Matlack 	 */
5543255530aSDavid Matlack 	old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
5553255530aSDavid Matlack 	if (old_spte != iter->old_spte) {
5563255530aSDavid Matlack 		/*
5573255530aSDavid Matlack 		 * The page table entry was modified by a different logical
5583255530aSDavid Matlack 		 * CPU. Refresh iter->old_spte with the current value so the
5593255530aSDavid Matlack 		 * caller operates on fresh data, e.g. if it retries
5603255530aSDavid Matlack 		 * tdp_mmu_set_spte_atomic().
5613255530aSDavid Matlack 		 */
5623255530aSDavid Matlack 		iter->old_spte = old_spte;
5633e72c791SDavid Matlack 		return -EBUSY;
5643255530aSDavid Matlack 	}
5659a77daacSBen Gardon 
56624ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
56708889894SSean Christopherson 			      new_spte, iter->level, true);
56824ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5699a77daacSBen Gardon 
5703e72c791SDavid Matlack 	return 0;
5719a77daacSBen Gardon }
5729a77daacSBen Gardon 
5733e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
57408f07c80SBen Gardon 					  struct tdp_iter *iter)
57508f07c80SBen Gardon {
5763e72c791SDavid Matlack 	int ret;
5773e72c791SDavid Matlack 
57808f07c80SBen Gardon 	/*
57908f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
58008f07c80SBen Gardon 	 * non-present value. This will stop other threads from
58108f07c80SBen Gardon 	 * immediately installing a present entry in its place
58208f07c80SBen Gardon 	 * before the TLBs are flushed.
58308f07c80SBen Gardon 	 */
5843e72c791SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
5853e72c791SDavid Matlack 	if (ret)
5863e72c791SDavid Matlack 		return ret;
58708f07c80SBen Gardon 
58808f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
58908f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
59008f07c80SBen Gardon 
59108f07c80SBen Gardon 	/*
59208f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
59308f07c80SBen Gardon 	 * must either wait on the MMU lock or use
594d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
59508f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
59608f07c80SBen Gardon 	 * here since the SPTE is going from non-present
59708f07c80SBen Gardon 	 * to non-present.
59808f07c80SBen Gardon 	 */
5990e587aa7SSean Christopherson 	kvm_tdp_mmu_write_spte(iter->sptep, 0);
60008f07c80SBen Gardon 
6013e72c791SDavid Matlack 	return 0;
60208f07c80SBen Gardon }
60308f07c80SBen Gardon 
6049a77daacSBen Gardon 
6059a77daacSBen Gardon /*
606fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
607626808d1SSean Christopherson  * @kvm:	      KVM instance
608626808d1SSean Christopherson  * @as_id:	      Address space ID, i.e. regular vs. SMM
609626808d1SSean Christopherson  * @sptep:	      Pointer to the SPTE
610626808d1SSean Christopherson  * @old_spte:	      The current value of the SPTE
611626808d1SSean Christopherson  * @new_spte:	      The new value that will be set for the SPTE
612626808d1SSean Christopherson  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
613626808d1SSean Christopherson  * @level:	      The level _containing_ the SPTE (its parent PT's level)
614fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
615fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
616fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
617fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
618fe43fa2fSBen Gardon  *		      double counted.
619fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
620fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
621fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
622fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
623fe43fa2fSBen Gardon  *		      writes from being double counted.
624fe43fa2fSBen Gardon  */
625626808d1SSean Christopherson static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
626626808d1SSean Christopherson 			       u64 old_spte, u64 new_spte, gfn_t gfn, int level,
627626808d1SSean Christopherson 			       bool record_acc_track, bool record_dirty_log)
628faaf05b0SBen Gardon {
629531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
6303a9a4aa5SBen Gardon 
63108f07c80SBen Gardon 	/*
632966da62aSSean Christopherson 	 * No thread should be using this function to set SPTEs to or from the
63308f07c80SBen Gardon 	 * temporary removed SPTE value.
63408f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
63508f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
63608f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
63708f07c80SBen Gardon 	 */
638626808d1SSean Christopherson 	WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
63908f07c80SBen Gardon 
640626808d1SSean Christopherson 	kvm_tdp_mmu_write_spte(sptep, new_spte);
641faaf05b0SBen Gardon 
642626808d1SSean Christopherson 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
643626808d1SSean Christopherson 
644f8e14497SBen Gardon 	if (record_acc_track)
645626808d1SSean Christopherson 		handle_changed_spte_acc_track(old_spte, new_spte, level);
646a6a0b05dSBen Gardon 	if (record_dirty_log)
647626808d1SSean Christopherson 		handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
648626808d1SSean Christopherson 					      new_spte, level);
649626808d1SSean Christopherson }
650626808d1SSean Christopherson 
651626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
652626808d1SSean Christopherson 				     u64 new_spte, bool record_acc_track,
653626808d1SSean Christopherson 				     bool record_dirty_log)
654626808d1SSean Christopherson {
655626808d1SSean Christopherson 	WARN_ON_ONCE(iter->yielded);
656626808d1SSean Christopherson 
657626808d1SSean Christopherson 	__tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte,
658626808d1SSean Christopherson 			   new_spte, iter->gfn, iter->level,
659626808d1SSean Christopherson 			   record_acc_track, record_dirty_log);
660f8e14497SBen Gardon }
661f8e14497SBen Gardon 
662f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
663f8e14497SBen Gardon 				    u64 new_spte)
664f8e14497SBen Gardon {
665626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
666f8e14497SBen Gardon }
667f8e14497SBen Gardon 
668f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
669f8e14497SBen Gardon 						 struct tdp_iter *iter,
670f8e14497SBen Gardon 						 u64 new_spte)
671f8e14497SBen Gardon {
672626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
673a6a0b05dSBen Gardon }
674a6a0b05dSBen Gardon 
675a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
676a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
677a6a0b05dSBen Gardon 						 u64 new_spte)
678a6a0b05dSBen Gardon {
679626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
680faaf05b0SBen Gardon }
681faaf05b0SBen Gardon 
682faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
68377aa6075SDavid Matlack 	for_each_tdp_pte(_iter, _root, _start, _end)
684faaf05b0SBen Gardon 
685f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
686f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
687f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
688f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
689f8e14497SBen Gardon 			continue;					\
690f8e14497SBen Gardon 		else
691f8e14497SBen Gardon 
692bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
693b9e5603cSPaolo Bonzini 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
694bb18842eSBen Gardon 
695faaf05b0SBen Gardon /*
696e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
697e28a436cSBen Gardon  * to the scheduler.
698e28a436cSBen Gardon  *
699e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
700e139a34eSBen Gardon  * TLB flush before yielding.
701e139a34eSBen Gardon  *
7023a0f64deSSean Christopherson  * If this function yields, iter->yielded is set and the caller must skip to
7033a0f64deSSean Christopherson  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
7043a0f64deSSean Christopherson  * over the paging structures to allow the iterator to continue its traversal
7053a0f64deSSean Christopherson  * from the paging structure root.
706e28a436cSBen Gardon  *
7073a0f64deSSean Christopherson  * Returns true if this function yielded.
708e28a436cSBen Gardon  */
7093a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
7103a0f64deSSean Christopherson 							  struct tdp_iter *iter,
7113a0f64deSSean Christopherson 							  bool flush, bool shared)
712a6a0b05dSBen Gardon {
7133a0f64deSSean Christopherson 	WARN_ON(iter->yielded);
7143a0f64deSSean Christopherson 
715ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
716ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
717ed5e484bSBen Gardon 		return false;
718ed5e484bSBen Gardon 
719531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7207cca2d0bSBen Gardon 		rcu_read_unlock();
7217cca2d0bSBen Gardon 
722e139a34eSBen Gardon 		if (flush)
723e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
724e139a34eSBen Gardon 
7256103bc07SBen Gardon 		if (shared)
7266103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
7276103bc07SBen Gardon 		else
728531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
7296103bc07SBen Gardon 
7307cca2d0bSBen Gardon 		rcu_read_lock();
731ed5e484bSBen Gardon 
732ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
733ed5e484bSBen Gardon 
7343a0f64deSSean Christopherson 		iter->yielded = true;
735a6a0b05dSBen Gardon 	}
736e28a436cSBen Gardon 
7373a0f64deSSean Christopherson 	return iter->yielded;
738a6a0b05dSBen Gardon }
739a6a0b05dSBen Gardon 
740c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
741c10743a1SSean Christopherson {
742c10743a1SSean Christopherson 	u64 old_spte;
743c10743a1SSean Christopherson 
744c10743a1SSean Christopherson 	/*
745c10743a1SSean Christopherson 	 * This helper intentionally doesn't allow zapping a root shadow page,
746c10743a1SSean Christopherson 	 * which doesn't have a parent page table and thus no associated entry.
747c10743a1SSean Christopherson 	 */
748c10743a1SSean Christopherson 	if (WARN_ON_ONCE(!sp->ptep))
749c10743a1SSean Christopherson 		return false;
750c10743a1SSean Christopherson 
751c10743a1SSean Christopherson 	rcu_read_lock();
752c10743a1SSean Christopherson 
753c10743a1SSean Christopherson 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
754c10743a1SSean Christopherson 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) {
755c10743a1SSean Christopherson 		rcu_read_unlock();
756c10743a1SSean Christopherson 		return false;
757c10743a1SSean Christopherson 	}
758c10743a1SSean Christopherson 
759c10743a1SSean Christopherson 	__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
760c10743a1SSean Christopherson 			   sp->gfn, sp->role.level + 1, true, true);
761c10743a1SSean Christopherson 
762c10743a1SSean Christopherson 	rcu_read_unlock();
763c10743a1SSean Christopherson 
764c10743a1SSean Christopherson 	return true;
765c10743a1SSean Christopherson }
766c10743a1SSean Christopherson 
767faaf05b0SBen Gardon /*
768faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
769faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
770faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
771faaf05b0SBen Gardon  * MMU lock.
7726103bc07SBen Gardon  *
773063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
774063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
775063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
776063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
7776103bc07SBen Gardon  * operation can cause a soft lockup.
7786103bc07SBen Gardon  *
7796103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7806103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7816103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7826103bc07SBen Gardon  * MMU lock in write mode.
783faaf05b0SBen Gardon  */
784faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7856103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7866103bc07SBen Gardon 			  bool shared)
787faaf05b0SBen Gardon {
788524a1e4eSSean Christopherson 	gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
789524a1e4eSSean Christopherson 	bool zap_all = (start == 0 && end >= max_gfn_host);
790faaf05b0SBen Gardon 	struct tdp_iter iter;
791faaf05b0SBen Gardon 
792524a1e4eSSean Christopherson 	/*
7930103098fSSean Christopherson 	 * No need to try to step down in the iterator when zapping all SPTEs,
7940103098fSSean Christopherson 	 * zapping the top-level non-leaf SPTEs will recurse on their children.
7950103098fSSean Christopherson 	 */
7960103098fSSean Christopherson 	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
7970103098fSSean Christopherson 
7980103098fSSean Christopherson 	/*
799524a1e4eSSean Christopherson 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
800524a1e4eSSean Christopherson 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
801524a1e4eSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
802524a1e4eSSean Christopherson 	 */
803524a1e4eSSean Christopherson 	end = min(end, max_gfn_host);
804524a1e4eSSean Christopherson 
8056103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
8066103bc07SBen Gardon 
8077cca2d0bSBen Gardon 	rcu_read_lock();
8087cca2d0bSBen Gardon 
80977aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
8106103bc07SBen Gardon retry:
8111af4a960SBen Gardon 		if (can_yield &&
8126103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
813a835429cSSean Christopherson 			flush = false;
8141af4a960SBen Gardon 			continue;
8151af4a960SBen Gardon 		}
8161af4a960SBen Gardon 
817faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
818faaf05b0SBen Gardon 			continue;
819faaf05b0SBen Gardon 
820faaf05b0SBen Gardon 		/*
821faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
822faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
823524a1e4eSSean Christopherson 		 * lower level, except when zapping all SPTEs.
824faaf05b0SBen Gardon 		 */
825524a1e4eSSean Christopherson 		if (!zap_all &&
826524a1e4eSSean Christopherson 		    (iter.gfn < start ||
827faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
828faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
829faaf05b0SBen Gardon 			continue;
830faaf05b0SBen Gardon 
8316103bc07SBen Gardon 		if (!shared) {
832faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
833a835429cSSean Christopherson 			flush = true;
8343e72c791SDavid Matlack 		} else if (tdp_mmu_zap_spte_atomic(kvm, &iter)) {
8356103bc07SBen Gardon 			goto retry;
8366103bc07SBen Gardon 		}
837faaf05b0SBen Gardon 	}
8387cca2d0bSBen Gardon 
8397cca2d0bSBen Gardon 	rcu_read_unlock();
840a835429cSSean Christopherson 	return flush;
841faaf05b0SBen Gardon }
842faaf05b0SBen Gardon 
843faaf05b0SBen Gardon /*
844faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
845faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
846faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
847faaf05b0SBen Gardon  * MMU lock.
848faaf05b0SBen Gardon  */
8492b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
8505a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
851faaf05b0SBen Gardon {
852faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
853faaf05b0SBen Gardon 
854614f6970SPaolo Bonzini 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
8556103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
8565a324c24SSean Christopherson 				      false);
857faaf05b0SBen Gardon 
858faaf05b0SBen Gardon 	return flush;
859faaf05b0SBen Gardon }
860faaf05b0SBen Gardon 
861faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
862faaf05b0SBen Gardon {
8632b9663d8SSean Christopherson 	int i;
864faaf05b0SBen Gardon 
865*77c8cd6bSSean Christopherson 	/*
866*77c8cd6bSSean Christopherson 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
867*77c8cd6bSSean Christopherson 	 * is being destroyed or the userspace VMM has exited.  In both cases,
868*77c8cd6bSSean Christopherson 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
869*77c8cd6bSSean Christopherson 	 */
8702b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
871*77c8cd6bSSean Christopherson 		(void)kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, false);
872faaf05b0SBen Gardon }
873bb18842eSBen Gardon 
8744c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
8754c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
8764c6654bdSBen Gardon {
8774c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8784c6654bdSBen Gardon 
8794c6654bdSBen Gardon 	if (prev_root)
8804c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8814c6654bdSBen Gardon 						  &prev_root->link,
8824c6654bdSBen Gardon 						  typeof(*prev_root), link);
8834c6654bdSBen Gardon 	else
8844c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8854c6654bdSBen Gardon 						   typeof(*next_root), link);
8864c6654bdSBen Gardon 
8874c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
8884c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
8894c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8904c6654bdSBen Gardon 						  &next_root->link,
8914c6654bdSBen Gardon 						  typeof(*next_root), link);
8924c6654bdSBen Gardon 
8934c6654bdSBen Gardon 	return next_root;
8944c6654bdSBen Gardon }
8954c6654bdSBen Gardon 
8964c6654bdSBen Gardon /*
897f28e9c7fSSean Christopherson  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
898f28e9c7fSSean Christopherson  * zap" completes.  Since kvm_tdp_mmu_invalidate_all_roots() has acquired a
899f28e9c7fSSean Christopherson  * reference to each invalidated root, roots will not be freed until after this
900f28e9c7fSSean Christopherson  * function drops the gifted reference, e.g. so that vCPUs don't get stuck with
901f28e9c7fSSean Christopherson  * tearing down paging structures.
9024c6654bdSBen Gardon  */
9034c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
9044c6654bdSBen Gardon {
9054c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
9064c6654bdSBen Gardon 	struct kvm_mmu_page *root;
9074c6654bdSBen Gardon 
9084c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
9094c6654bdSBen Gardon 
9104c6654bdSBen Gardon 	rcu_read_lock();
9114c6654bdSBen Gardon 
9124c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
9134c6654bdSBen Gardon 
9144c6654bdSBen Gardon 	while (root) {
9154c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
9164c6654bdSBen Gardon 
9174c6654bdSBen Gardon 		rcu_read_unlock();
9184c6654bdSBen Gardon 
9197ae5840eSSean Christopherson 		/*
9207ae5840eSSean Christopherson 		 * A TLB flush is unnecessary, invalidated roots are guaranteed
9217ae5840eSSean Christopherson 		 * to be unreachable by the guest (see kvm_tdp_mmu_put_root()
9227ae5840eSSean Christopherson 		 * for more details), and unlike the legacy MMU, no vCPU kick
9237ae5840eSSean Christopherson 		 * is needed to play nice with lockless shadow walks as the TDP
9247ae5840eSSean Christopherson 		 * MMU protects its paging structures via RCU.  Note, zapping
9257ae5840eSSean Christopherson 		 * will still flush on yield, but that's a minor performance
9267ae5840eSSean Christopherson 		 * blip and not a functional issue.
9277ae5840eSSean Christopherson 		 */
9287ae5840eSSean Christopherson 		(void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
9294c6654bdSBen Gardon 
9304c6654bdSBen Gardon 		/*
9314c6654bdSBen Gardon 		 * Put the reference acquired in
9324c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
9334c6654bdSBen Gardon 		 */
9344c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
9354c6654bdSBen Gardon 
9364c6654bdSBen Gardon 		root = next_root;
9374c6654bdSBen Gardon 
9384c6654bdSBen Gardon 		rcu_read_lock();
9394c6654bdSBen Gardon 	}
9404c6654bdSBen Gardon 
9414c6654bdSBen Gardon 	rcu_read_unlock();
9424c6654bdSBen Gardon }
9434c6654bdSBen Gardon 
944bb18842eSBen Gardon /*
945f28e9c7fSSean Christopherson  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
946f28e9c7fSSean Christopherson  * is about to be zapped, e.g. in response to a memslots update.  The caller is
947f28e9c7fSSean Christopherson  * responsible for invoking kvm_tdp_mmu_zap_invalidated_roots() to do the actual
948f28e9c7fSSean Christopherson  * zapping.
949b7cccd39SBen Gardon  *
950f28e9c7fSSean Christopherson  * Take a reference on all roots to prevent the root from being freed before it
951f28e9c7fSSean Christopherson  * is zapped by this thread.  Freeing a root is not a correctness issue, but if
952f28e9c7fSSean Christopherson  * a vCPU drops the last reference to a root prior to the root being zapped, it
953f28e9c7fSSean Christopherson  * will get stuck with tearing down the entire paging structure.
9544c6654bdSBen Gardon  *
955f28e9c7fSSean Christopherson  * Get a reference even if the root is already invalid,
956f28e9c7fSSean Christopherson  * kvm_tdp_mmu_zap_invalidated_roots() assumes it was gifted a reference to all
957f28e9c7fSSean Christopherson  * invalid roots, e.g. there's no epoch to identify roots that were invalidated
958f28e9c7fSSean Christopherson  * by a previous call.  Roots stay on the list until the last reference is
959f28e9c7fSSean Christopherson  * dropped, so even though all invalid roots are zapped, a root may not go away
960f28e9c7fSSean Christopherson  * for quite some time, e.g. if a vCPU blocks across multiple memslot updates.
961f28e9c7fSSean Christopherson  *
962f28e9c7fSSean Christopherson  * Because mmu_lock is held for write, it should be impossible to observe a
963f28e9c7fSSean Christopherson  * root with zero refcount, i.e. the list of roots cannot be stale.
9644c6654bdSBen Gardon  *
965b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
966b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
967b7cccd39SBen Gardon  */
968b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
969b7cccd39SBen Gardon {
970b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
971b7cccd39SBen Gardon 
972b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
973f28e9c7fSSean Christopherson 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
974f28e9c7fSSean Christopherson 		if (!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
975b7cccd39SBen Gardon 			root->role.invalid = true;
976b7cccd39SBen Gardon 	}
977f28e9c7fSSean Christopherson }
978b7cccd39SBen Gardon 
979bb18842eSBen Gardon /*
980bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
981bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
982bb18842eSBen Gardon  */
983cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
984cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
985cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
986bb18842eSBen Gardon {
987c435d4b7SSean Christopherson 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
988bb18842eSBen Gardon 	u64 new_spte;
98957a3e96dSKai Huang 	int ret = RET_PF_FIXED;
990ad67e480SPaolo Bonzini 	bool wrprot = false;
991bb18842eSBen Gardon 
9927158bee4SPaolo Bonzini 	WARN_ON(sp->role.level != fault->goal_level);
993e710c5f6SDavid Matlack 	if (unlikely(!fault->slot))
994bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
9959a77daacSBen Gardon 	else
99653597858SDavid Matlack 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
9972839180cSPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefetch, true,
9987158bee4SPaolo Bonzini 					 fault->map_writable, &new_spte);
999bb18842eSBen Gardon 
1000bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
1001bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
10023e72c791SDavid Matlack 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
10039a77daacSBen Gardon 		return RET_PF_RETRY;
1004bb18842eSBen Gardon 
1005bb18842eSBen Gardon 	/*
1006bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
1007bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
1008bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
1009bb18842eSBen Gardon 	 */
1010ad67e480SPaolo Bonzini 	if (wrprot) {
1011cdc47767SPaolo Bonzini 		if (fault->write)
1012bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
1013bb18842eSBen Gardon 	}
1014bb18842eSBen Gardon 
1015bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
10169a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
10179a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
10189a77daacSBen Gardon 				     new_spte);
1019bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
10203849e092SSean Christopherson 	} else {
10219a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
10229a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
10233849e092SSean Christopherson 	}
1024bb18842eSBen Gardon 
1025857f8474SKai Huang 	/*
1026857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
1027857f8474SKai Huang 	 * consistent with legacy MMU behavior.
1028857f8474SKai Huang 	 */
1029857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
1030bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
1031bb18842eSBen Gardon 
1032bb18842eSBen Gardon 	return ret;
1033bb18842eSBen Gardon }
1034bb18842eSBen Gardon 
1035bb18842eSBen Gardon /*
1036cb00a70bSDavid Matlack  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1037cb00a70bSDavid Matlack  * provided page table.
10387b7e1ab6SDavid Matlack  *
10397b7e1ab6SDavid Matlack  * @kvm: kvm instance
10407b7e1ab6SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
10417b7e1ab6SDavid Matlack  * @sp: The new TDP page table to install.
10427b7e1ab6SDavid Matlack  * @account_nx: True if this page table is being installed to split a
10437b7e1ab6SDavid Matlack  *              non-executable huge page.
1044cb00a70bSDavid Matlack  * @shared: This operation is running under the MMU lock in read mode.
10457b7e1ab6SDavid Matlack  *
10467b7e1ab6SDavid Matlack  * Returns: 0 if the new page table was installed. Non-0 if the page table
10477b7e1ab6SDavid Matlack  *          could not be installed (e.g. the atomic compare-exchange failed).
10487b7e1ab6SDavid Matlack  */
1049cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1050cb00a70bSDavid Matlack 			   struct kvm_mmu_page *sp, bool account_nx,
1051cb00a70bSDavid Matlack 			   bool shared)
10527b7e1ab6SDavid Matlack {
10537b7e1ab6SDavid Matlack 	u64 spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask);
1054cb00a70bSDavid Matlack 	int ret = 0;
10557b7e1ab6SDavid Matlack 
1056cb00a70bSDavid Matlack 	if (shared) {
10577b7e1ab6SDavid Matlack 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
10587b7e1ab6SDavid Matlack 		if (ret)
10597b7e1ab6SDavid Matlack 			return ret;
1060cb00a70bSDavid Matlack 	} else {
1061cb00a70bSDavid Matlack 		tdp_mmu_set_spte(kvm, iter, spte);
1062cb00a70bSDavid Matlack 	}
10637b7e1ab6SDavid Matlack 
10647b7e1ab6SDavid Matlack 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
10657b7e1ab6SDavid Matlack 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
10667b7e1ab6SDavid Matlack 	if (account_nx)
10677b7e1ab6SDavid Matlack 		account_huge_nx_page(kvm, sp);
10687b7e1ab6SDavid Matlack 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
10697b7e1ab6SDavid Matlack 
10707b7e1ab6SDavid Matlack 	return 0;
10717b7e1ab6SDavid Matlack }
10727b7e1ab6SDavid Matlack 
10737b7e1ab6SDavid Matlack /*
1074bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1075bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
1076bb18842eSBen Gardon  */
10772f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1078bb18842eSBen Gardon {
1079bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1080bb18842eSBen Gardon 	struct tdp_iter iter;
108189c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1082bb18842eSBen Gardon 	int ret;
1083bb18842eSBen Gardon 
108473a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
1085bb18842eSBen Gardon 
1086f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
10877cca2d0bSBen Gardon 
10887cca2d0bSBen Gardon 	rcu_read_lock();
10897cca2d0bSBen Gardon 
10902f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
109173a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
1092536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1093bb18842eSBen Gardon 
109473a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
1095bb18842eSBen Gardon 			break;
1096bb18842eSBen Gardon 
1097bb18842eSBen Gardon 		/*
1098bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1099bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1100bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1101bb18842eSBen Gardon 		 */
1102bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1103bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
11043e72c791SDavid Matlack 			if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
11059a77daacSBen Gardon 				break;
1106bb18842eSBen Gardon 
1107bb18842eSBen Gardon 			/*
1108bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1109bb18842eSBen Gardon 			 * because the new value informs the !present
1110bb18842eSBen Gardon 			 * path below.
1111bb18842eSBen Gardon 			 */
11120e587aa7SSean Christopherson 			iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
1113bb18842eSBen Gardon 		}
1114bb18842eSBen Gardon 
1115bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
11167b7e1ab6SDavid Matlack 			bool account_nx = fault->huge_page_disallowed &&
11177b7e1ab6SDavid Matlack 					  fault->req_level >= iter.level;
11187b7e1ab6SDavid Matlack 
1119ff76d506SKai Huang 			/*
1120c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1121ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1122ff76d506SKai Huang 			 * allocation and free.
1123ff76d506SKai Huang 			 */
1124ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1125ff76d506SKai Huang 				break;
1126ff76d506SKai Huang 
1127a82070b6SDavid Matlack 			sp = tdp_mmu_alloc_sp(vcpu);
1128a82070b6SDavid Matlack 			tdp_mmu_init_child_sp(sp, &iter);
1129a82070b6SDavid Matlack 
1130cb00a70bSDavid Matlack 			if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) {
11319a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
11329a77daacSBen Gardon 				break;
11339a77daacSBen Gardon 			}
1134bb18842eSBen Gardon 		}
1135bb18842eSBen Gardon 	}
1136bb18842eSBen Gardon 
113773a3c659SPaolo Bonzini 	if (iter.level != fault->goal_level) {
11387cca2d0bSBen Gardon 		rcu_read_unlock();
1139bb18842eSBen Gardon 		return RET_PF_RETRY;
11407cca2d0bSBen Gardon 	}
1141bb18842eSBen Gardon 
1142cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
11437cca2d0bSBen Gardon 	rcu_read_unlock();
1144bb18842eSBen Gardon 
1145bb18842eSBen Gardon 	return ret;
1146bb18842eSBen Gardon }
1147063afacdSBen Gardon 
11483039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
11493039bcc7SSean Christopherson 				 bool flush)
1150063afacdSBen Gardon {
115183b83a02SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
115283b83a02SSean Christopherson 					   range->end, range->may_block, flush);
11533039bcc7SSean Christopherson }
11543039bcc7SSean Christopherson 
11553039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
11563039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
11573039bcc7SSean Christopherson 
11583039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
11593039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1160c1b91493SSean Christopherson 						   tdp_handler_t handler)
1161063afacdSBen Gardon {
1162063afacdSBen Gardon 	struct kvm_mmu_page *root;
11633039bcc7SSean Christopherson 	struct tdp_iter iter;
11643039bcc7SSean Christopherson 	bool ret = false;
1165063afacdSBen Gardon 
1166063afacdSBen Gardon 	/*
1167e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1168e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1169063afacdSBen Gardon 	 */
11703039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1171a151acecSSean Christopherson 		rcu_read_lock();
1172a151acecSSean Christopherson 
11733039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
11743039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
1175063afacdSBen Gardon 
11763039bcc7SSean Christopherson 		rcu_read_unlock();
1177a151acecSSean Christopherson 	}
1178063afacdSBen Gardon 
1179063afacdSBen Gardon 	return ret;
1180063afacdSBen Gardon }
1181063afacdSBen Gardon 
1182f8e14497SBen Gardon /*
1183f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1184f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1185f8e14497SBen Gardon  */
11863039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
11873039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1188f8e14497SBen Gardon {
1189f8e14497SBen Gardon 	u64 new_spte = 0;
1190f8e14497SBen Gardon 
11913039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
11923039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
11933039bcc7SSean Christopherson 		return false;
11947cca2d0bSBen Gardon 
11953039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1196f8e14497SBen Gardon 
1197f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
11988f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1199f8e14497SBen Gardon 	} else {
1200f8e14497SBen Gardon 		/*
1201f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1202f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1203f8e14497SBen Gardon 		 */
1204f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1205f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1206f8e14497SBen Gardon 
1207f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1208f8e14497SBen Gardon 	}
1209f8e14497SBen Gardon 
12103039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
121133dd3574SBen Gardon 
12123039bcc7SSean Christopherson 	return true;
1213f8e14497SBen Gardon }
1214f8e14497SBen Gardon 
12153039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1216f8e14497SBen Gardon {
12173039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1218f8e14497SBen Gardon }
1219f8e14497SBen Gardon 
12203039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
12213039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1222f8e14497SBen Gardon {
12233039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1224f8e14497SBen Gardon }
1225f8e14497SBen Gardon 
12263039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1227f8e14497SBen Gardon {
12283039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
12293039bcc7SSean Christopherson }
12303039bcc7SSean Christopherson 
12313039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
12323039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
12333039bcc7SSean Christopherson {
12343039bcc7SSean Christopherson 	u64 new_spte;
12353039bcc7SSean Christopherson 
12363039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
12373039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
12383039bcc7SSean Christopherson 
12393039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
12403039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
12413039bcc7SSean Christopherson 		return false;
12423039bcc7SSean Christopherson 
12433039bcc7SSean Christopherson 	/*
12443039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
12453039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
12463039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
12473039bcc7SSean Christopherson 	 * See __handle_changed_spte().
12483039bcc7SSean Christopherson 	 */
12493039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
12503039bcc7SSean Christopherson 
12513039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
12523039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
12533039bcc7SSean Christopherson 								  pte_pfn(range->pte));
12543039bcc7SSean Christopherson 
12553039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
12563039bcc7SSean Christopherson 	}
12573039bcc7SSean Christopherson 
12583039bcc7SSean Christopherson 	return true;
1259f8e14497SBen Gardon }
12601d8dd6b3SBen Gardon 
12611d8dd6b3SBen Gardon /*
12621d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
12631d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
12641d8dd6b3SBen Gardon  * notifier.
12651d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
12661d8dd6b3SBen Gardon  */
12673039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
12681d8dd6b3SBen Gardon {
126993fa50f6SSean Christopherson 	/*
127093fa50f6SSean Christopherson 	 * No need to handle the remote TLB flush under RCU protection, the
127193fa50f6SSean Christopherson 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
127293fa50f6SSean Christopherson 	 * shadow page.  See the WARN on pfn_changed in __handle_changed_spte().
127393fa50f6SSean Christopherson 	 */
127493fa50f6SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
12751d8dd6b3SBen Gardon }
12761d8dd6b3SBen Gardon 
1277a6a0b05dSBen Gardon /*
1278bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1279bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1280bedd9195SDavid Matlack  * be flushed.
1281a6a0b05dSBen Gardon  */
1282a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1283a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1284a6a0b05dSBen Gardon {
1285a6a0b05dSBen Gardon 	struct tdp_iter iter;
1286a6a0b05dSBen Gardon 	u64 new_spte;
1287a6a0b05dSBen Gardon 	bool spte_set = false;
1288a6a0b05dSBen Gardon 
12897cca2d0bSBen Gardon 	rcu_read_lock();
12907cca2d0bSBen Gardon 
1291a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1292a6a0b05dSBen Gardon 
129377aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
129424ae4cfaSBen Gardon retry:
129524ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12961af4a960SBen Gardon 			continue;
12971af4a960SBen Gardon 
1298a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
12990f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
13000f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1301a6a0b05dSBen Gardon 			continue;
1302a6a0b05dSBen Gardon 
1303a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1304a6a0b05dSBen Gardon 
13053e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
130624ae4cfaSBen Gardon 			goto retry;
13073255530aSDavid Matlack 
1308a6a0b05dSBen Gardon 		spte_set = true;
1309a6a0b05dSBen Gardon 	}
13107cca2d0bSBen Gardon 
13117cca2d0bSBen Gardon 	rcu_read_unlock();
1312a6a0b05dSBen Gardon 	return spte_set;
1313a6a0b05dSBen Gardon }
1314a6a0b05dSBen Gardon 
1315a6a0b05dSBen Gardon /*
1316a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1317a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1318a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1319a6a0b05dSBen Gardon  */
1320269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1321269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1322a6a0b05dSBen Gardon {
1323a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1324a6a0b05dSBen Gardon 	bool spte_set = false;
1325a6a0b05dSBen Gardon 
132624ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1327a6a0b05dSBen Gardon 
1328d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1329a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1330a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1331a6a0b05dSBen Gardon 
1332a6a0b05dSBen Gardon 	return spte_set;
1333a6a0b05dSBen Gardon }
1334a6a0b05dSBen Gardon 
1335a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1336a3fe5dbdSDavid Matlack {
1337a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1338a3fe5dbdSDavid Matlack 
1339a3fe5dbdSDavid Matlack 	gfp |= __GFP_ZERO;
1340a3fe5dbdSDavid Matlack 
1341a3fe5dbdSDavid Matlack 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1342a3fe5dbdSDavid Matlack 	if (!sp)
1343a3fe5dbdSDavid Matlack 		return NULL;
1344a3fe5dbdSDavid Matlack 
1345a3fe5dbdSDavid Matlack 	sp->spt = (void *)__get_free_page(gfp);
1346a3fe5dbdSDavid Matlack 	if (!sp->spt) {
1347a3fe5dbdSDavid Matlack 		kmem_cache_free(mmu_page_header_cache, sp);
1348a3fe5dbdSDavid Matlack 		return NULL;
1349a3fe5dbdSDavid Matlack 	}
1350a3fe5dbdSDavid Matlack 
1351a3fe5dbdSDavid Matlack 	return sp;
1352a3fe5dbdSDavid Matlack }
1353a3fe5dbdSDavid Matlack 
1354a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1355cb00a70bSDavid Matlack 						       struct tdp_iter *iter,
1356cb00a70bSDavid Matlack 						       bool shared)
1357a3fe5dbdSDavid Matlack {
1358a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1359a3fe5dbdSDavid Matlack 
1360a3fe5dbdSDavid Matlack 	/*
1361a3fe5dbdSDavid Matlack 	 * Since we are allocating while under the MMU lock we have to be
1362a3fe5dbdSDavid Matlack 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1363a3fe5dbdSDavid Matlack 	 * reclaim and to avoid making any filesystem callbacks (which can end
1364a3fe5dbdSDavid Matlack 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1365a3fe5dbdSDavid Matlack 	 *
1366a3fe5dbdSDavid Matlack 	 * If this allocation fails we drop the lock and retry with reclaim
1367a3fe5dbdSDavid Matlack 	 * allowed.
1368a3fe5dbdSDavid Matlack 	 */
1369a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1370a3fe5dbdSDavid Matlack 	if (sp)
1371a3fe5dbdSDavid Matlack 		return sp;
1372a3fe5dbdSDavid Matlack 
1373a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1374cb00a70bSDavid Matlack 
1375cb00a70bSDavid Matlack 	if (shared)
1376a3fe5dbdSDavid Matlack 		read_unlock(&kvm->mmu_lock);
1377cb00a70bSDavid Matlack 	else
1378cb00a70bSDavid Matlack 		write_unlock(&kvm->mmu_lock);
1379a3fe5dbdSDavid Matlack 
1380a3fe5dbdSDavid Matlack 	iter->yielded = true;
1381a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1382a3fe5dbdSDavid Matlack 
1383cb00a70bSDavid Matlack 	if (shared)
1384a3fe5dbdSDavid Matlack 		read_lock(&kvm->mmu_lock);
1385cb00a70bSDavid Matlack 	else
1386cb00a70bSDavid Matlack 		write_lock(&kvm->mmu_lock);
1387cb00a70bSDavid Matlack 
1388a3fe5dbdSDavid Matlack 	rcu_read_lock();
1389a3fe5dbdSDavid Matlack 
1390a3fe5dbdSDavid Matlack 	return sp;
1391a3fe5dbdSDavid Matlack }
1392a3fe5dbdSDavid Matlack 
1393cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1394cb00a70bSDavid Matlack 				   struct kvm_mmu_page *sp, bool shared)
1395a3fe5dbdSDavid Matlack {
1396a3fe5dbdSDavid Matlack 	const u64 huge_spte = iter->old_spte;
1397a3fe5dbdSDavid Matlack 	const int level = iter->level;
1398a3fe5dbdSDavid Matlack 	int ret, i;
1399a3fe5dbdSDavid Matlack 
1400a3fe5dbdSDavid Matlack 	tdp_mmu_init_child_sp(sp, iter);
1401a3fe5dbdSDavid Matlack 
1402a3fe5dbdSDavid Matlack 	/*
1403a3fe5dbdSDavid Matlack 	 * No need for atomics when writing to sp->spt since the page table has
1404a3fe5dbdSDavid Matlack 	 * not been linked in yet and thus is not reachable from any other CPU.
1405a3fe5dbdSDavid Matlack 	 */
1406a3fe5dbdSDavid Matlack 	for (i = 0; i < PT64_ENT_PER_PAGE; i++)
1407a3fe5dbdSDavid Matlack 		sp->spt[i] = make_huge_page_split_spte(huge_spte, level, i);
1408a3fe5dbdSDavid Matlack 
1409a3fe5dbdSDavid Matlack 	/*
1410a3fe5dbdSDavid Matlack 	 * Replace the huge spte with a pointer to the populated lower level
1411a3fe5dbdSDavid Matlack 	 * page table. Since we are making this change without a TLB flush vCPUs
1412a3fe5dbdSDavid Matlack 	 * will see a mix of the split mappings and the original huge mapping,
1413a3fe5dbdSDavid Matlack 	 * depending on what's currently in their TLB. This is fine from a
1414a3fe5dbdSDavid Matlack 	 * correctness standpoint since the translation will be the same either
1415a3fe5dbdSDavid Matlack 	 * way.
1416a3fe5dbdSDavid Matlack 	 */
1417cb00a70bSDavid Matlack 	ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
1418a3fe5dbdSDavid Matlack 	if (ret)
1419e0b728b1SDavid Matlack 		goto out;
1420a3fe5dbdSDavid Matlack 
1421a3fe5dbdSDavid Matlack 	/*
1422a3fe5dbdSDavid Matlack 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1423a3fe5dbdSDavid Matlack 	 * are overwriting from the page stats. But we have to manually update
1424a3fe5dbdSDavid Matlack 	 * the page stats with the new present child pages.
1425a3fe5dbdSDavid Matlack 	 */
1426a3fe5dbdSDavid Matlack 	kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
1427a3fe5dbdSDavid Matlack 
1428e0b728b1SDavid Matlack out:
1429e0b728b1SDavid Matlack 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1430e0b728b1SDavid Matlack 	return ret;
1431a3fe5dbdSDavid Matlack }
1432a3fe5dbdSDavid Matlack 
1433a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1434a3fe5dbdSDavid Matlack 					 struct kvm_mmu_page *root,
1435a3fe5dbdSDavid Matlack 					 gfn_t start, gfn_t end,
1436cb00a70bSDavid Matlack 					 int target_level, bool shared)
1437a3fe5dbdSDavid Matlack {
1438a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp = NULL;
1439a3fe5dbdSDavid Matlack 	struct tdp_iter iter;
1440a3fe5dbdSDavid Matlack 	int ret = 0;
1441a3fe5dbdSDavid Matlack 
1442a3fe5dbdSDavid Matlack 	rcu_read_lock();
1443a3fe5dbdSDavid Matlack 
1444a3fe5dbdSDavid Matlack 	/*
1445a3fe5dbdSDavid Matlack 	 * Traverse the page table splitting all huge pages above the target
1446a3fe5dbdSDavid Matlack 	 * level into one lower level. For example, if we encounter a 1GB page
1447a3fe5dbdSDavid Matlack 	 * we split it into 512 2MB pages.
1448a3fe5dbdSDavid Matlack 	 *
1449a3fe5dbdSDavid Matlack 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1450a3fe5dbdSDavid Matlack 	 * to visit an SPTE before ever visiting its children, which means we
1451a3fe5dbdSDavid Matlack 	 * will correctly recursively split huge pages that are more than one
1452a3fe5dbdSDavid Matlack 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1453a3fe5dbdSDavid Matlack 	 * and then splitting each of those to 512 4KB pages).
1454a3fe5dbdSDavid Matlack 	 */
1455a3fe5dbdSDavid Matlack 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1456a3fe5dbdSDavid Matlack retry:
1457cb00a70bSDavid Matlack 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1458a3fe5dbdSDavid Matlack 			continue;
1459a3fe5dbdSDavid Matlack 
1460a3fe5dbdSDavid Matlack 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1461a3fe5dbdSDavid Matlack 			continue;
1462a3fe5dbdSDavid Matlack 
1463a3fe5dbdSDavid Matlack 		if (!sp) {
1464cb00a70bSDavid Matlack 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1465a3fe5dbdSDavid Matlack 			if (!sp) {
1466a3fe5dbdSDavid Matlack 				ret = -ENOMEM;
1467e0b728b1SDavid Matlack 				trace_kvm_mmu_split_huge_page(iter.gfn,
1468e0b728b1SDavid Matlack 							      iter.old_spte,
1469e0b728b1SDavid Matlack 							      iter.level, ret);
1470a3fe5dbdSDavid Matlack 				break;
1471a3fe5dbdSDavid Matlack 			}
1472a3fe5dbdSDavid Matlack 
1473a3fe5dbdSDavid Matlack 			if (iter.yielded)
1474a3fe5dbdSDavid Matlack 				continue;
1475a3fe5dbdSDavid Matlack 		}
1476a3fe5dbdSDavid Matlack 
1477cb00a70bSDavid Matlack 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1478a3fe5dbdSDavid Matlack 			goto retry;
1479a3fe5dbdSDavid Matlack 
1480a3fe5dbdSDavid Matlack 		sp = NULL;
1481a3fe5dbdSDavid Matlack 	}
1482a3fe5dbdSDavid Matlack 
1483a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1484a3fe5dbdSDavid Matlack 
1485a3fe5dbdSDavid Matlack 	/*
1486a3fe5dbdSDavid Matlack 	 * It's possible to exit the loop having never used the last sp if, for
1487a3fe5dbdSDavid Matlack 	 * example, a vCPU doing HugePage NX splitting wins the race and
1488a3fe5dbdSDavid Matlack 	 * installs its own sp in place of the last sp we tried to split.
1489a3fe5dbdSDavid Matlack 	 */
1490a3fe5dbdSDavid Matlack 	if (sp)
1491a3fe5dbdSDavid Matlack 		tdp_mmu_free_sp(sp);
1492a3fe5dbdSDavid Matlack 
1493a3fe5dbdSDavid Matlack 	return ret;
1494a3fe5dbdSDavid Matlack }
1495a3fe5dbdSDavid Matlack 
1496cb00a70bSDavid Matlack 
1497a3fe5dbdSDavid Matlack /*
1498a3fe5dbdSDavid Matlack  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1499a3fe5dbdSDavid Matlack  */
1500a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1501a3fe5dbdSDavid Matlack 				      const struct kvm_memory_slot *slot,
1502a3fe5dbdSDavid Matlack 				      gfn_t start, gfn_t end,
1503cb00a70bSDavid Matlack 				      int target_level, bool shared)
1504a3fe5dbdSDavid Matlack {
1505a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *root;
1506a3fe5dbdSDavid Matlack 	int r = 0;
1507a3fe5dbdSDavid Matlack 
1508cb00a70bSDavid Matlack 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1509a3fe5dbdSDavid Matlack 
15107c554d8eSPaolo Bonzini 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1511cb00a70bSDavid Matlack 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1512a3fe5dbdSDavid Matlack 		if (r) {
1513cb00a70bSDavid Matlack 			kvm_tdp_mmu_put_root(kvm, root, shared);
1514a3fe5dbdSDavid Matlack 			break;
1515a3fe5dbdSDavid Matlack 		}
1516a3fe5dbdSDavid Matlack 	}
1517a3fe5dbdSDavid Matlack }
1518a3fe5dbdSDavid Matlack 
1519a6a0b05dSBen Gardon /*
1520a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1521a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1522a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1523a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1524a6a0b05dSBen Gardon  * be flushed.
1525a6a0b05dSBen Gardon  */
1526a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1527a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1528a6a0b05dSBen Gardon {
1529a6a0b05dSBen Gardon 	struct tdp_iter iter;
1530a6a0b05dSBen Gardon 	u64 new_spte;
1531a6a0b05dSBen Gardon 	bool spte_set = false;
1532a6a0b05dSBen Gardon 
15337cca2d0bSBen Gardon 	rcu_read_lock();
15347cca2d0bSBen Gardon 
1535a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
153624ae4cfaSBen Gardon retry:
153724ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
15381af4a960SBen Gardon 			continue;
15391af4a960SBen Gardon 
15403354ef5aSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
15413354ef5aSSean Christopherson 			continue;
15423354ef5aSSean Christopherson 
1543a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1544a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1545a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1546a6a0b05dSBen Gardon 			else
1547a6a0b05dSBen Gardon 				continue;
1548a6a0b05dSBen Gardon 		} else {
1549a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1550a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1551a6a0b05dSBen Gardon 			else
1552a6a0b05dSBen Gardon 				continue;
1553a6a0b05dSBen Gardon 		}
1554a6a0b05dSBen Gardon 
15553e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
155624ae4cfaSBen Gardon 			goto retry;
15573255530aSDavid Matlack 
1558a6a0b05dSBen Gardon 		spte_set = true;
1559a6a0b05dSBen Gardon 	}
15607cca2d0bSBen Gardon 
15617cca2d0bSBen Gardon 	rcu_read_unlock();
1562a6a0b05dSBen Gardon 	return spte_set;
1563a6a0b05dSBen Gardon }
1564a6a0b05dSBen Gardon 
1565a6a0b05dSBen Gardon /*
1566a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1567a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1568a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1569a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1570a6a0b05dSBen Gardon  * be flushed.
1571a6a0b05dSBen Gardon  */
1572269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1573269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1574a6a0b05dSBen Gardon {
1575a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1576a6a0b05dSBen Gardon 	bool spte_set = false;
1577a6a0b05dSBen Gardon 
157824ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1579a6a0b05dSBen Gardon 
1580d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1581a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1582a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1583a6a0b05dSBen Gardon 
1584a6a0b05dSBen Gardon 	return spte_set;
1585a6a0b05dSBen Gardon }
1586a6a0b05dSBen Gardon 
1587a6a0b05dSBen Gardon /*
1588a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1589a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1590a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1591a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1592a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1593a6a0b05dSBen Gardon  */
1594a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1595a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1596a6a0b05dSBen Gardon {
1597a6a0b05dSBen Gardon 	struct tdp_iter iter;
1598a6a0b05dSBen Gardon 	u64 new_spte;
1599a6a0b05dSBen Gardon 
16007cca2d0bSBen Gardon 	rcu_read_lock();
16017cca2d0bSBen Gardon 
1602a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1603a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1604a6a0b05dSBen Gardon 		if (!mask)
1605a6a0b05dSBen Gardon 			break;
1606a6a0b05dSBen Gardon 
1607a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1608a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1609a6a0b05dSBen Gardon 			continue;
1610a6a0b05dSBen Gardon 
1611f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1612f1b3b06aSBen Gardon 
1613a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1614a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1615a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1616a6a0b05dSBen Gardon 			else
1617a6a0b05dSBen Gardon 				continue;
1618a6a0b05dSBen Gardon 		} else {
1619a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1620a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1621a6a0b05dSBen Gardon 			else
1622a6a0b05dSBen Gardon 				continue;
1623a6a0b05dSBen Gardon 		}
1624a6a0b05dSBen Gardon 
1625a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1626a6a0b05dSBen Gardon 	}
16277cca2d0bSBen Gardon 
16287cca2d0bSBen Gardon 	rcu_read_unlock();
1629a6a0b05dSBen Gardon }
1630a6a0b05dSBen Gardon 
1631a6a0b05dSBen Gardon /*
1632a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1633a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1634a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1635a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1636a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1637a6a0b05dSBen Gardon  */
1638a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1639a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1640a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1641a6a0b05dSBen Gardon 				       bool wrprot)
1642a6a0b05dSBen Gardon {
1643a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1644a6a0b05dSBen Gardon 
1645531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1646a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1647a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1648a6a0b05dSBen Gardon }
1649a6a0b05dSBen Gardon 
1650a6a0b05dSBen Gardon /*
165187aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
165287aa9ec9SBen Gardon  * GFNs within the slot.
165314881998SBen Gardon  */
16544b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm,
165514881998SBen Gardon 				       struct kvm_mmu_page *root,
16564b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
165714881998SBen Gardon {
16589eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
16599eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
166014881998SBen Gardon 	struct tdp_iter iter;
166114881998SBen Gardon 	kvm_pfn_t pfn;
166214881998SBen Gardon 
16637cca2d0bSBen Gardon 	rcu_read_lock();
16647cca2d0bSBen Gardon 
166514881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
16662db6f772SBen Gardon retry:
16674b85c921SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
16681af4a960SBen Gardon 			continue;
16691af4a960SBen Gardon 
167014881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
167187aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
167214881998SBen Gardon 			continue;
167314881998SBen Gardon 
167414881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
167514881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
16769eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
16779eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
167814881998SBen Gardon 			continue;
167914881998SBen Gardon 
16804b85c921SSean Christopherson 		/* Note, a successful atomic zap also does a remote TLB flush. */
16813e72c791SDavid Matlack 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
16822db6f772SBen Gardon 			goto retry;
16832db6f772SBen Gardon 	}
168414881998SBen Gardon 
16857cca2d0bSBen Gardon 	rcu_read_unlock();
168614881998SBen Gardon }
168714881998SBen Gardon 
168814881998SBen Gardon /*
168914881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
169014881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
169114881998SBen Gardon  */
16924b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
16934b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
169414881998SBen Gardon {
169514881998SBen Gardon 	struct kvm_mmu_page *root;
169614881998SBen Gardon 
16972db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
169814881998SBen Gardon 
1699d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
17004b85c921SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
170114881998SBen Gardon }
170246044f72SBen Gardon 
170346044f72SBen Gardon /*
170446044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
17055fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
170646044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
170746044f72SBen Gardon  */
170846044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
17093ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
171046044f72SBen Gardon {
171146044f72SBen Gardon 	struct tdp_iter iter;
171246044f72SBen Gardon 	u64 new_spte;
171346044f72SBen Gardon 	bool spte_set = false;
171446044f72SBen Gardon 
17153ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
17163ad93562SKeqian Zhu 
17177cca2d0bSBen Gardon 	rcu_read_lock();
17187cca2d0bSBen Gardon 
171977aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
17203ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
17213ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
17223ad93562SKeqian Zhu 			continue;
17233ad93562SKeqian Zhu 
172446044f72SBen Gardon 		new_spte = iter.old_spte &
17255fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
172646044f72SBen Gardon 
17277c8a4742SDavid Matlack 		if (new_spte == iter.old_spte)
17287c8a4742SDavid Matlack 			break;
17297c8a4742SDavid Matlack 
173046044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
173146044f72SBen Gardon 		spte_set = true;
173246044f72SBen Gardon 	}
173346044f72SBen Gardon 
17347cca2d0bSBen Gardon 	rcu_read_unlock();
17357cca2d0bSBen Gardon 
173646044f72SBen Gardon 	return spte_set;
173746044f72SBen Gardon }
173846044f72SBen Gardon 
173946044f72SBen Gardon /*
174046044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
17415fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
174246044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
174346044f72SBen Gardon  */
174446044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
17453ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
17463ad93562SKeqian Zhu 				   int min_level)
174746044f72SBen Gardon {
174846044f72SBen Gardon 	struct kvm_mmu_page *root;
174946044f72SBen Gardon 	bool spte_set = false;
175046044f72SBen Gardon 
1751531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1752a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
17533ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1754a3f15bdaSSean Christopherson 
175546044f72SBen Gardon 	return spte_set;
175646044f72SBen Gardon }
175746044f72SBen Gardon 
175895fb5b02SBen Gardon /*
175995fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
176095fb5b02SBen Gardon  * That SPTE may be non-present.
1761c5c8c7c5SDavid Matlack  *
1762c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
176395fb5b02SBen Gardon  */
176439b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
176539b4d43eSSean Christopherson 			 int *root_level)
176695fb5b02SBen Gardon {
176795fb5b02SBen Gardon 	struct tdp_iter iter;
176895fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
176995fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
17702aa07893SSean Christopherson 	int leaf = -1;
177195fb5b02SBen Gardon 
177239b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
177395fb5b02SBen Gardon 
177495fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
177595fb5b02SBen Gardon 		leaf = iter.level;
1776dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
177795fb5b02SBen Gardon 	}
177895fb5b02SBen Gardon 
177995fb5b02SBen Gardon 	return leaf;
178095fb5b02SBen Gardon }
17816e8eb206SDavid Matlack 
17826e8eb206SDavid Matlack /*
17836e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
17846e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
17856e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
17866e8eb206SDavid Matlack  *
17876e8eb206SDavid Matlack  * Contract:
17886e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
17896e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
17906e8eb206SDavid Matlack  *
17916e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
17926e8eb206SDavid Matlack  */
17936e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
17946e8eb206SDavid Matlack 					u64 *spte)
17956e8eb206SDavid Matlack {
17966e8eb206SDavid Matlack 	struct tdp_iter iter;
17976e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
17986e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
17996e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
18006e8eb206SDavid Matlack 
18016e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
18026e8eb206SDavid Matlack 		*spte = iter.old_spte;
18036e8eb206SDavid Matlack 		sptep = iter.sptep;
18046e8eb206SDavid Matlack 	}
18056e8eb206SDavid Matlack 
18066e8eb206SDavid Matlack 	/*
18076e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
18086e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
18096e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
18106e8eb206SDavid Matlack 	 * annotation.
18116e8eb206SDavid Matlack 	 *
18126e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
18136e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
18146e8eb206SDavid Matlack 	 * outside of mmu_lock.
18156e8eb206SDavid Matlack 	 */
18166e8eb206SDavid Matlack 	return rcu_dereference(sptep);
18176e8eb206SDavid Matlack }
1818