xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 614f6970)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
32226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */
33226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
346103bc07SBen Gardon 							     bool shared)
356103bc07SBen Gardon {
366103bc07SBen Gardon 	if (shared)
376103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
386103bc07SBen Gardon 	else
396103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
40226b8c8fSSean Christopherson 
41226b8c8fSSean Christopherson 	return true;
426103bc07SBen Gardon }
436103bc07SBen Gardon 
44fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
45fe5db27dSBen Gardon {
46fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
47fe5db27dSBen Gardon 		return;
4802c00b3aSBen Gardon 
49524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
5002c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
517cca2d0bSBen Gardon 
527cca2d0bSBen Gardon 	/*
537cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
547cca2d0bSBen Gardon 	 * can run before the VM is torn down.
557cca2d0bSBen Gardon 	 */
567cca2d0bSBen Gardon 	rcu_barrier();
5702c00b3aSBen Gardon }
5802c00b3aSBen Gardon 
592bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
606103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
616103bc07SBen Gardon 			  bool shared);
622bdb3d84SBen Gardon 
632bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
64a889ea54SBen Gardon {
652bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
662bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
67a889ea54SBen Gardon }
68a889ea54SBen Gardon 
69c0e64238SBen Gardon /*
70c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
71c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
72c0e64238SBen Gardon  * the memory.
73c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
74c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
75c0e64238SBen Gardon  * memory won't use it after it is freed.
76c0e64238SBen Gardon  */
77c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
78a889ea54SBen Gardon {
79c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
80c0e64238SBen Gardon 					       rcu_head);
81a889ea54SBen Gardon 
82c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
83a889ea54SBen Gardon }
84a889ea54SBen Gardon 
856103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
866103bc07SBen Gardon 			  bool shared)
872bdb3d84SBen Gardon {
886103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
892bdb3d84SBen Gardon 
9011cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
912bdb3d84SBen Gardon 		return;
922bdb3d84SBen Gardon 
932bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
942bdb3d84SBen Gardon 
95c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
96c0e64238SBen Gardon 	list_del_rcu(&root->link);
97c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
982bdb3d84SBen Gardon 
99db01416bSSean Christopherson 	/*
100db01416bSSean Christopherson 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
101db01416bSSean Christopherson 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
102db01416bSSean Christopherson 	 * to a different pCPU.  Note, the local TLB flush on reuse also
103db01416bSSean Christopherson 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
104db01416bSSean Christopherson 	 * intermediate paging structures, that may be zapped, as such entries
105db01416bSSean Christopherson 	 * are associated with the ASID on both VMX and SVM.
106db01416bSSean Christopherson 	 */
107db01416bSSean Christopherson 	(void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
1082bdb3d84SBen Gardon 
109c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
110a889ea54SBen Gardon }
111a889ea54SBen Gardon 
112cfc10997SBen Gardon /*
113d62007edSSean Christopherson  * Returns the next root after @prev_root (or the first root if @prev_root is
114d62007edSSean Christopherson  * NULL).  A reference to the returned root is acquired, and the reference to
115d62007edSSean Christopherson  * @prev_root is released (the caller obviously must hold a reference to
116d62007edSSean Christopherson  * @prev_root if it's non-NULL).
117d62007edSSean Christopherson  *
118d62007edSSean Christopherson  * If @only_valid is true, invalid roots are skipped.
119d62007edSSean Christopherson  *
120d62007edSSean Christopherson  * Returns NULL if the end of tdp_mmu_roots was reached.
121cfc10997SBen Gardon  */
122cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1236103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
124d62007edSSean Christopherson 					      bool shared, bool only_valid)
125a889ea54SBen Gardon {
126a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
127a889ea54SBen Gardon 
128c0e64238SBen Gardon 	rcu_read_lock();
129c0e64238SBen Gardon 
130cfc10997SBen Gardon 	if (prev_root)
131c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132c0e64238SBen Gardon 						  &prev_root->link,
133c0e64238SBen Gardon 						  typeof(*prev_root), link);
134cfc10997SBen Gardon 	else
135c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
136cfc10997SBen Gardon 						   typeof(*next_root), link);
137cfc10997SBen Gardon 
13804dc4e6cSSean Christopherson 	while (next_root) {
139d62007edSSean Christopherson 		if ((!only_valid || !next_root->role.invalid) &&
140ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(next_root))
14104dc4e6cSSean Christopherson 			break;
14204dc4e6cSSean Christopherson 
143c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
144c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
14504dc4e6cSSean Christopherson 	}
146fb101293SBen Gardon 
147c0e64238SBen Gardon 	rcu_read_unlock();
148cfc10997SBen Gardon 
149cfc10997SBen Gardon 	if (prev_root)
1506103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
151cfc10997SBen Gardon 
152a889ea54SBen Gardon 	return next_root;
153a889ea54SBen Gardon }
154a889ea54SBen Gardon 
155a889ea54SBen Gardon /*
156a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
157a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
158a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
159a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1606103bc07SBen Gardon  *
1616103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1626103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1636103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
164a889ea54SBen Gardon  */
165d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
166d62007edSSean Christopherson 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
167cfc10997SBen Gardon 	     _root;								\
168d62007edSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
169*614f6970SPaolo Bonzini 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
170*614f6970SPaolo Bonzini 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
171a3f15bdaSSean Christopherson 		} else
172a889ea54SBen Gardon 
173d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
174d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
175d62007edSSean Christopherson 
176*614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
177*614f6970SPaolo Bonzini 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
178d62007edSSean Christopherson 
179226b8c8fSSean Christopherson /*
180226b8c8fSSean Christopherson  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
181226b8c8fSSean Christopherson  * the implication being that any flow that holds mmu_lock for read is
182226b8c8fSSean Christopherson  * inherently yield-friendly and should use the yield-safe variant above.
183226b8c8fSSean Christopherson  * Holding mmu_lock for write obviates the need for RCU protection as the list
184226b8c8fSSean Christopherson  * is guaranteed to be stable.
185226b8c8fSSean Christopherson  */
186a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
187226b8c8fSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
188226b8c8fSSean Christopherson 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
189226b8c8fSSean Christopherson 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
190a3f15bdaSSean Christopherson 		} else
19102c00b3aSBen Gardon 
192a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
19302c00b3aSBen Gardon {
19402c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
19502c00b3aSBen Gardon 
19602c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
19702c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
198a82070b6SDavid Matlack 
199a82070b6SDavid Matlack 	return sp;
200a82070b6SDavid Matlack }
201a82070b6SDavid Matlack 
202a82070b6SDavid Matlack static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, gfn_t gfn,
203a82070b6SDavid Matlack 			      union kvm_mmu_page_role role)
204a82070b6SDavid Matlack {
20502c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
20602c00b3aSBen Gardon 
207a3aca4deSDavid Matlack 	sp->role = role;
20802c00b3aSBen Gardon 	sp->gfn = gfn;
20902c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
21002c00b3aSBen Gardon 
21133dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
21202c00b3aSBen Gardon }
21302c00b3aSBen Gardon 
214a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
215a3aca4deSDavid Matlack 				  struct tdp_iter *iter)
216a3aca4deSDavid Matlack {
217a3aca4deSDavid Matlack 	struct kvm_mmu_page *parent_sp;
218a3aca4deSDavid Matlack 	union kvm_mmu_page_role role;
219a3aca4deSDavid Matlack 
220a3aca4deSDavid Matlack 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
221a3aca4deSDavid Matlack 
222a3aca4deSDavid Matlack 	role = parent_sp->role;
223a3aca4deSDavid Matlack 	role.level--;
224a3aca4deSDavid Matlack 
225a82070b6SDavid Matlack 	tdp_mmu_init_sp(child_sp, iter->gfn, role);
226a3aca4deSDavid Matlack }
227a3aca4deSDavid Matlack 
2286e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
22902c00b3aSBen Gardon {
230a3aca4deSDavid Matlack 	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
23102c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
23202c00b3aSBen Gardon 	struct kvm_mmu_page *root;
23302c00b3aSBen Gardon 
2346e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
23502c00b3aSBen Gardon 
23604dc4e6cSSean Christopherson 	/*
23704dc4e6cSSean Christopherson 	 * Check for an existing root before allocating a new one.  Note, the
23804dc4e6cSSean Christopherson 	 * role check prevents consuming an invalid root.
23904dc4e6cSSean Christopherson 	 */
240a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
241fb101293SBen Gardon 		if (root->role.word == role.word &&
242ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(root))
2436e6ec584SSean Christopherson 			goto out;
24402c00b3aSBen Gardon 	}
24502c00b3aSBen Gardon 
246a82070b6SDavid Matlack 	root = tdp_mmu_alloc_sp(vcpu);
247a82070b6SDavid Matlack 	tdp_mmu_init_sp(root, 0, role);
248a82070b6SDavid Matlack 
24911cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
25002c00b3aSBen Gardon 
251c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
252c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
253c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
25402c00b3aSBen Gardon 
2556e6ec584SSean Christopherson out:
25602c00b3aSBen Gardon 	return __pa(root->spt);
257fe5db27dSBen Gardon }
2582f2fad08SBen Gardon 
2592f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2609a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2619a77daacSBen Gardon 				bool shared);
2622f2fad08SBen Gardon 
263f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
264f8e14497SBen Gardon {
265f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
266f8e14497SBen Gardon 		return;
267f8e14497SBen Gardon 
268f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
26964bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
27064bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
271f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
272f8e14497SBen Gardon }
273f8e14497SBen Gardon 
274a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
275a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
276a6a0b05dSBen Gardon {
277a6a0b05dSBen Gardon 	bool pfn_changed;
278a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
279a6a0b05dSBen Gardon 
280a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
281a6a0b05dSBen Gardon 		return;
282a6a0b05dSBen Gardon 
283a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
284a6a0b05dSBen Gardon 
285a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
286a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
287a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
288fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
289a6a0b05dSBen Gardon 	}
290a6a0b05dSBen Gardon }
291a6a0b05dSBen Gardon 
2922f2fad08SBen Gardon /**
293c298a30cSDavid Matlack  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
294a9442f59SBen Gardon  *
295a9442f59SBen Gardon  * @kvm: kvm instance
296a9442f59SBen Gardon  * @sp: the page to be removed
2979a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2989a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2999a77daacSBen Gardon  *	    threads that might be adding or removing pages.
300a9442f59SBen Gardon  */
301c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
3029a77daacSBen Gardon 			      bool shared)
303a9442f59SBen Gardon {
3049a77daacSBen Gardon 	if (shared)
3059a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
3069a77daacSBen Gardon 	else
307a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
308a9442f59SBen Gardon 
309a9442f59SBen Gardon 	list_del(&sp->link);
310a9442f59SBen Gardon 	if (sp->lpage_disallowed)
311a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
3129a77daacSBen Gardon 
3139a77daacSBen Gardon 	if (shared)
3149a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
315a9442f59SBen Gardon }
316a9442f59SBen Gardon 
317a9442f59SBen Gardon /**
3180f53dfa3SDavid Matlack  * handle_removed_pt() - handle a page table removed from the TDP structure
319a066e61fSBen Gardon  *
320a066e61fSBen Gardon  * @kvm: kvm instance
321a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3229a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3239a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3249a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
325a066e61fSBen Gardon  *
326a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
327a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
32870fb3e41SBen Gardon  *
32970fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
33070fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
33170fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
33270fb3e41SBen Gardon  * early rcu_dereferences in the function.
333a066e61fSBen Gardon  */
3340f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
335a066e61fSBen Gardon {
33670fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
337a066e61fSBen Gardon 	int level = sp->role.level;
338e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
339a066e61fSBen Gardon 	int i;
340a066e61fSBen Gardon 
341a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
342a066e61fSBen Gardon 
343c298a30cSDavid Matlack 	tdp_mmu_unlink_sp(kvm, sp, shared);
344a066e61fSBen Gardon 
345a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
346574c3c55SBen Gardon 		u64 *sptep = rcu_dereference(pt) + i;
347574c3c55SBen Gardon 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
348574c3c55SBen Gardon 		u64 old_child_spte;
3499a77daacSBen Gardon 
3509a77daacSBen Gardon 		if (shared) {
351e25f0e0cSBen Gardon 			/*
352e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
353e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
354e25f0e0cSBen Gardon 			 * already marked as removed then another thread
355e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
356e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
357e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
358e25f0e0cSBen Gardon 			 */
359e25f0e0cSBen Gardon 			for (;;) {
360e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
361e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
362e25f0e0cSBen Gardon 					break;
363e25f0e0cSBen Gardon 				cpu_relax();
364e25f0e0cSBen Gardon 			}
3659a77daacSBen Gardon 		} else {
3668df9f1afSSean Christopherson 			/*
3678df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3688df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3698df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3708df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3718df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3728df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3738df9f1afSSean Christopherson 			 * unreachable.
3748df9f1afSSean Christopherson 			 */
3759a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3768df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3778df9f1afSSean Christopherson 				continue;
378e25f0e0cSBen Gardon 
379e25f0e0cSBen Gardon 			/*
380e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
381e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
382e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
383e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
384e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
385e25f0e0cSBen Gardon 			 * the function.
386e25f0e0cSBen Gardon 			 */
387e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3889a77daacSBen Gardon 		}
389e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
390f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
391e25f0e0cSBen Gardon 				    shared);
392a066e61fSBen Gardon 	}
393a066e61fSBen Gardon 
394574c3c55SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
395f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
396a066e61fSBen Gardon 
3977cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
398a066e61fSBen Gardon }
399a066e61fSBen Gardon 
400a066e61fSBen Gardon /**
4017f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
4022f2fad08SBen Gardon  * @kvm: kvm instance
4032f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
4042f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
4052f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
4062f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
4072f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
4089a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
4099a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
4109a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
4112f2fad08SBen Gardon  *
4122f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
4132f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
4142f2fad08SBen Gardon  */
4152f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4169a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
4179a77daacSBen Gardon 				  bool shared)
4182f2fad08SBen Gardon {
4192f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4202f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4212f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4222f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4232f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4242f2fad08SBen Gardon 
4252f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4262f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
427764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4282f2fad08SBen Gardon 
4292f2fad08SBen Gardon 	/*
4302f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4312f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4322f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
433d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4342f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4352f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4362f2fad08SBen Gardon 	 * thread before replacement.
4372f2fad08SBen Gardon 	 */
4382f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4392f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4402f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4412f2fad08SBen Gardon 		       "different PFN!\n"
4422f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4432f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4442f2fad08SBen Gardon 
4452f2fad08SBen Gardon 		/*
4462f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
447d9f6e12fSIngo Molnar 		 * corruption.
4482f2fad08SBen Gardon 		 */
4492f2fad08SBen Gardon 		BUG();
4502f2fad08SBen Gardon 	}
4512f2fad08SBen Gardon 
4522f2fad08SBen Gardon 	if (old_spte == new_spte)
4532f2fad08SBen Gardon 		return;
4542f2fad08SBen Gardon 
455b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
456b9a98c34SBen Gardon 
457115111efSDavid Matlack 	if (is_leaf)
458115111efSDavid Matlack 		check_spte_writable_invariants(new_spte);
459115111efSDavid Matlack 
4602f2fad08SBen Gardon 	/*
4612f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4622f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4632f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4642f2fad08SBen Gardon 	 */
4652f2fad08SBen Gardon 	if (!was_present && !is_present) {
4662f2fad08SBen Gardon 		/*
46708f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
46808f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
46908f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
47008f07c80SBen Gardon 		 * are nonpresent.
4712f2fad08SBen Gardon 		 */
47208f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
47308f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
47408f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4752f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4762f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4772f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
47808f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
47908f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4802f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4812f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4822f2fad08SBen Gardon 		return;
4832f2fad08SBen Gardon 	}
4842f2fad08SBen Gardon 
48571f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
48671f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4872f2fad08SBen Gardon 
4882f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
48964bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4902f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4912f2fad08SBen Gardon 
4922f2fad08SBen Gardon 	/*
4932f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4942f2fad08SBen Gardon 	 * the paging structure.
4952f2fad08SBen Gardon 	 */
496a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
4970f53dfa3SDavid Matlack 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
4982f2fad08SBen Gardon }
4992f2fad08SBen Gardon 
5002f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
5019a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
5029a77daacSBen Gardon 				bool shared)
5032f2fad08SBen Gardon {
5049a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
5059a77daacSBen Gardon 			      shared);
506f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
507a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
508a6a0b05dSBen Gardon 				      new_spte, level);
5092f2fad08SBen Gardon }
510faaf05b0SBen Gardon 
511fe43fa2fSBen Gardon /*
5126ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
5136ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
51424ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
5159a77daacSBen Gardon  *
5163255530aSDavid Matlack  * If setting the SPTE fails because it has changed, iter->old_spte will be
5173255530aSDavid Matlack  * refreshed to the current value of the spte.
5183255530aSDavid Matlack  *
5199a77daacSBen Gardon  * @kvm: kvm instance
5209a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5219a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5223e72c791SDavid Matlack  * Return:
5233e72c791SDavid Matlack  * * 0      - If the SPTE was set.
5243e72c791SDavid Matlack  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
5253e72c791SDavid Matlack  *            no side-effects other than setting iter->old_spte to the last
5263e72c791SDavid Matlack  *            known value of the spte.
5279a77daacSBen Gardon  */
5283e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
5299a77daacSBen Gardon 					  struct tdp_iter *iter,
5309a77daacSBen Gardon 					  u64 new_spte)
5319a77daacSBen Gardon {
5323255530aSDavid Matlack 	u64 *sptep = rcu_dereference(iter->sptep);
5333255530aSDavid Matlack 	u64 old_spte;
5343255530aSDavid Matlack 
5353a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
5363a0f64deSSean Christopherson 
5379a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5389a77daacSBen Gardon 
53908f07c80SBen Gardon 	/*
54008f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
54108f07c80SBen Gardon 	 * may modify it.
54208f07c80SBen Gardon 	 */
5437a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
5443e72c791SDavid Matlack 		return -EBUSY;
54508f07c80SBen Gardon 
5466e8eb206SDavid Matlack 	/*
5476e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5486e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5496e8eb206SDavid Matlack 	 */
5503255530aSDavid Matlack 	old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
5513255530aSDavid Matlack 	if (old_spte != iter->old_spte) {
5523255530aSDavid Matlack 		/*
5533255530aSDavid Matlack 		 * The page table entry was modified by a different logical
5543255530aSDavid Matlack 		 * CPU. Refresh iter->old_spte with the current value so the
5553255530aSDavid Matlack 		 * caller operates on fresh data, e.g. if it retries
5563255530aSDavid Matlack 		 * tdp_mmu_set_spte_atomic().
5573255530aSDavid Matlack 		 */
5583255530aSDavid Matlack 		iter->old_spte = old_spte;
5593e72c791SDavid Matlack 		return -EBUSY;
5603255530aSDavid Matlack 	}
5619a77daacSBen Gardon 
56224ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
56308889894SSean Christopherson 			      new_spte, iter->level, true);
56424ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5659a77daacSBen Gardon 
5663e72c791SDavid Matlack 	return 0;
5679a77daacSBen Gardon }
5689a77daacSBen Gardon 
5693e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
57008f07c80SBen Gardon 					  struct tdp_iter *iter)
57108f07c80SBen Gardon {
5723e72c791SDavid Matlack 	int ret;
5733e72c791SDavid Matlack 
57408f07c80SBen Gardon 	/*
57508f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
57608f07c80SBen Gardon 	 * non-present value. This will stop other threads from
57708f07c80SBen Gardon 	 * immediately installing a present entry in its place
57808f07c80SBen Gardon 	 * before the TLBs are flushed.
57908f07c80SBen Gardon 	 */
5803e72c791SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
5813e72c791SDavid Matlack 	if (ret)
5823e72c791SDavid Matlack 		return ret;
58308f07c80SBen Gardon 
58408f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
58508f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
58608f07c80SBen Gardon 
58708f07c80SBen Gardon 	/*
58808f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
58908f07c80SBen Gardon 	 * must either wait on the MMU lock or use
590d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
59108f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
59208f07c80SBen Gardon 	 * here since the SPTE is going from non-present
59308f07c80SBen Gardon 	 * to non-present.
59408f07c80SBen Gardon 	 */
59514f6fec2SBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
59608f07c80SBen Gardon 
5973e72c791SDavid Matlack 	return 0;
59808f07c80SBen Gardon }
59908f07c80SBen Gardon 
6009a77daacSBen Gardon 
6019a77daacSBen Gardon /*
602fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
603fe43fa2fSBen Gardon  * @kvm: kvm instance
604fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
605fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
606fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
607fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
608fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
609fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
610fe43fa2fSBen Gardon  *		      double counted.
611fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
612fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
613fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
614fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
615fe43fa2fSBen Gardon  *		      writes from being double counted.
616fe43fa2fSBen Gardon  */
617f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
618a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
619a6a0b05dSBen Gardon 				      bool record_dirty_log)
620faaf05b0SBen Gardon {
6213a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
6223a0f64deSSean Christopherson 
623531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
6243a9a4aa5SBen Gardon 
62508f07c80SBen Gardon 	/*
62608f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
62708f07c80SBen Gardon 	 * temporary removed SPTE value.
62808f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
62908f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
63008f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
63108f07c80SBen Gardon 	 */
6327a51393aSSean Christopherson 	WARN_ON(is_removed_spte(iter->old_spte));
63308f07c80SBen Gardon 
6347cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
635faaf05b0SBen Gardon 
63608889894SSean Christopherson 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
63708889894SSean Christopherson 			      new_spte, iter->level, false);
638f8e14497SBen Gardon 	if (record_acc_track)
639f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
640f8e14497SBen Gardon 					      iter->level);
641a6a0b05dSBen Gardon 	if (record_dirty_log)
64208889894SSean Christopherson 		handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
643a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
644a6a0b05dSBen Gardon 					      iter->level);
645f8e14497SBen Gardon }
646f8e14497SBen Gardon 
647f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
648f8e14497SBen Gardon 				    u64 new_spte)
649f8e14497SBen Gardon {
650a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
651f8e14497SBen Gardon }
652f8e14497SBen Gardon 
653f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
654f8e14497SBen Gardon 						 struct tdp_iter *iter,
655f8e14497SBen Gardon 						 u64 new_spte)
656f8e14497SBen Gardon {
657a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
658a6a0b05dSBen Gardon }
659a6a0b05dSBen Gardon 
660a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
661a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
662a6a0b05dSBen Gardon 						 u64 new_spte)
663a6a0b05dSBen Gardon {
664a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
665faaf05b0SBen Gardon }
666faaf05b0SBen Gardon 
667faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
66877aa6075SDavid Matlack 	for_each_tdp_pte(_iter, _root, _start, _end)
669faaf05b0SBen Gardon 
670f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
671f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
672f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
673f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
674f8e14497SBen Gardon 			continue;					\
675f8e14497SBen Gardon 		else
676f8e14497SBen Gardon 
677bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
678b9e5603cSPaolo Bonzini 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
679bb18842eSBen Gardon 
680faaf05b0SBen Gardon /*
681e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
682e28a436cSBen Gardon  * to the scheduler.
683e28a436cSBen Gardon  *
684e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
685e139a34eSBen Gardon  * TLB flush before yielding.
686e139a34eSBen Gardon  *
6873a0f64deSSean Christopherson  * If this function yields, iter->yielded is set and the caller must skip to
6883a0f64deSSean Christopherson  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
6893a0f64deSSean Christopherson  * over the paging structures to allow the iterator to continue its traversal
6903a0f64deSSean Christopherson  * from the paging structure root.
691e28a436cSBen Gardon  *
6923a0f64deSSean Christopherson  * Returns true if this function yielded.
693e28a436cSBen Gardon  */
6943a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
6953a0f64deSSean Christopherson 							  struct tdp_iter *iter,
6963a0f64deSSean Christopherson 							  bool flush, bool shared)
697a6a0b05dSBen Gardon {
6983a0f64deSSean Christopherson 	WARN_ON(iter->yielded);
6993a0f64deSSean Christopherson 
700ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
701ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
702ed5e484bSBen Gardon 		return false;
703ed5e484bSBen Gardon 
704531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7057cca2d0bSBen Gardon 		rcu_read_unlock();
7067cca2d0bSBen Gardon 
707e139a34eSBen Gardon 		if (flush)
708e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
709e139a34eSBen Gardon 
7106103bc07SBen Gardon 		if (shared)
7116103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
7126103bc07SBen Gardon 		else
713531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
7146103bc07SBen Gardon 
7157cca2d0bSBen Gardon 		rcu_read_lock();
716ed5e484bSBen Gardon 
717ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
718ed5e484bSBen Gardon 
7193a0f64deSSean Christopherson 		iter->yielded = true;
720a6a0b05dSBen Gardon 	}
721e28a436cSBen Gardon 
7223a0f64deSSean Christopherson 	return iter->yielded;
723a6a0b05dSBen Gardon }
724a6a0b05dSBen Gardon 
725faaf05b0SBen Gardon /*
726faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
727faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
728faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
729faaf05b0SBen Gardon  * MMU lock.
7306103bc07SBen Gardon  *
731063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
732063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
733063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
734063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
7356103bc07SBen Gardon  * operation can cause a soft lockup.
7366103bc07SBen Gardon  *
7376103bc07SBen Gardon  * If shared is true, this thread holds the MMU lock in read mode and must
7386103bc07SBen Gardon  * account for the possibility that other threads are modifying the paging
7396103bc07SBen Gardon  * structures concurrently. If shared is false, this thread should hold the
7406103bc07SBen Gardon  * MMU lock in write mode.
741faaf05b0SBen Gardon  */
742faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
7436103bc07SBen Gardon 			  gfn_t start, gfn_t end, bool can_yield, bool flush,
7446103bc07SBen Gardon 			  bool shared)
745faaf05b0SBen Gardon {
746524a1e4eSSean Christopherson 	gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
747524a1e4eSSean Christopherson 	bool zap_all = (start == 0 && end >= max_gfn_host);
748faaf05b0SBen Gardon 	struct tdp_iter iter;
749faaf05b0SBen Gardon 
750524a1e4eSSean Christopherson 	/*
7510103098fSSean Christopherson 	 * No need to try to step down in the iterator when zapping all SPTEs,
7520103098fSSean Christopherson 	 * zapping the top-level non-leaf SPTEs will recurse on their children.
7530103098fSSean Christopherson 	 */
7540103098fSSean Christopherson 	int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
7550103098fSSean Christopherson 
7560103098fSSean Christopherson 	/*
757524a1e4eSSean Christopherson 	 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
758524a1e4eSSean Christopherson 	 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
759524a1e4eSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
760524a1e4eSSean Christopherson 	 */
761524a1e4eSSean Christopherson 	end = min(end, max_gfn_host);
762524a1e4eSSean Christopherson 
7636103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
7646103bc07SBen Gardon 
7657cca2d0bSBen Gardon 	rcu_read_lock();
7667cca2d0bSBen Gardon 
76777aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
7686103bc07SBen Gardon retry:
7691af4a960SBen Gardon 		if (can_yield &&
7706103bc07SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
771a835429cSSean Christopherson 			flush = false;
7721af4a960SBen Gardon 			continue;
7731af4a960SBen Gardon 		}
7741af4a960SBen Gardon 
775faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
776faaf05b0SBen Gardon 			continue;
777faaf05b0SBen Gardon 
778faaf05b0SBen Gardon 		/*
779faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
780faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
781524a1e4eSSean Christopherson 		 * lower level, except when zapping all SPTEs.
782faaf05b0SBen Gardon 		 */
783524a1e4eSSean Christopherson 		if (!zap_all &&
784524a1e4eSSean Christopherson 		    (iter.gfn < start ||
785faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
786faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
787faaf05b0SBen Gardon 			continue;
788faaf05b0SBen Gardon 
7896103bc07SBen Gardon 		if (!shared) {
790faaf05b0SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, 0);
791a835429cSSean Christopherson 			flush = true;
7923e72c791SDavid Matlack 		} else if (tdp_mmu_zap_spte_atomic(kvm, &iter)) {
7936103bc07SBen Gardon 			goto retry;
7946103bc07SBen Gardon 		}
795faaf05b0SBen Gardon 	}
7967cca2d0bSBen Gardon 
7977cca2d0bSBen Gardon 	rcu_read_unlock();
798a835429cSSean Christopherson 	return flush;
799faaf05b0SBen Gardon }
800faaf05b0SBen Gardon 
801faaf05b0SBen Gardon /*
802faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
803faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
804faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
805faaf05b0SBen Gardon  * MMU lock.
806faaf05b0SBen Gardon  */
8072b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
8085a324c24SSean Christopherson 				 gfn_t end, bool can_yield, bool flush)
809faaf05b0SBen Gardon {
810faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
811faaf05b0SBen Gardon 
812*614f6970SPaolo Bonzini 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
8136103bc07SBen Gardon 		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
8145a324c24SSean Christopherson 				      false);
815faaf05b0SBen Gardon 
816faaf05b0SBen Gardon 	return flush;
817faaf05b0SBen Gardon }
818faaf05b0SBen Gardon 
819faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
820faaf05b0SBen Gardon {
8212b9663d8SSean Christopherson 	bool flush = false;
8222b9663d8SSean Christopherson 	int i;
823faaf05b0SBen Gardon 
8242b9663d8SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
8255a324c24SSean Christopherson 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
8262b9663d8SSean Christopherson 
827faaf05b0SBen Gardon 	if (flush)
828faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
829faaf05b0SBen Gardon }
830bb18842eSBen Gardon 
8314c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
8324c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
8334c6654bdSBen Gardon {
8344c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8354c6654bdSBen Gardon 
8364c6654bdSBen Gardon 	if (prev_root)
8374c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8384c6654bdSBen Gardon 						  &prev_root->link,
8394c6654bdSBen Gardon 						  typeof(*prev_root), link);
8404c6654bdSBen Gardon 	else
8414c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8424c6654bdSBen Gardon 						   typeof(*next_root), link);
8434c6654bdSBen Gardon 
8444c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
8454c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
8464c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
8474c6654bdSBen Gardon 						  &next_root->link,
8484c6654bdSBen Gardon 						  typeof(*next_root), link);
8494c6654bdSBen Gardon 
8504c6654bdSBen Gardon 	return next_root;
8514c6654bdSBen Gardon }
8524c6654bdSBen Gardon 
8534c6654bdSBen Gardon /*
854f28e9c7fSSean Christopherson  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
855f28e9c7fSSean Christopherson  * zap" completes.  Since kvm_tdp_mmu_invalidate_all_roots() has acquired a
856f28e9c7fSSean Christopherson  * reference to each invalidated root, roots will not be freed until after this
857f28e9c7fSSean Christopherson  * function drops the gifted reference, e.g. so that vCPUs don't get stuck with
858f28e9c7fSSean Christopherson  * tearing down paging structures.
8594c6654bdSBen Gardon  */
8604c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
8614c6654bdSBen Gardon {
8624c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
8634c6654bdSBen Gardon 	struct kvm_mmu_page *root;
8644c6654bdSBen Gardon 
8654c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
8664c6654bdSBen Gardon 
8674c6654bdSBen Gardon 	rcu_read_lock();
8684c6654bdSBen Gardon 
8694c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
8704c6654bdSBen Gardon 
8714c6654bdSBen Gardon 	while (root) {
8724c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
8734c6654bdSBen Gardon 
8744c6654bdSBen Gardon 		rcu_read_unlock();
8754c6654bdSBen Gardon 
8767ae5840eSSean Christopherson 		/*
8777ae5840eSSean Christopherson 		 * A TLB flush is unnecessary, invalidated roots are guaranteed
8787ae5840eSSean Christopherson 		 * to be unreachable by the guest (see kvm_tdp_mmu_put_root()
8797ae5840eSSean Christopherson 		 * for more details), and unlike the legacy MMU, no vCPU kick
8807ae5840eSSean Christopherson 		 * is needed to play nice with lockless shadow walks as the TDP
8817ae5840eSSean Christopherson 		 * MMU protects its paging structures via RCU.  Note, zapping
8827ae5840eSSean Christopherson 		 * will still flush on yield, but that's a minor performance
8837ae5840eSSean Christopherson 		 * blip and not a functional issue.
8847ae5840eSSean Christopherson 		 */
8857ae5840eSSean Christopherson 		(void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
8864c6654bdSBen Gardon 
8874c6654bdSBen Gardon 		/*
8884c6654bdSBen Gardon 		 * Put the reference acquired in
8894c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
8904c6654bdSBen Gardon 		 */
8914c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
8924c6654bdSBen Gardon 
8934c6654bdSBen Gardon 		root = next_root;
8944c6654bdSBen Gardon 
8954c6654bdSBen Gardon 		rcu_read_lock();
8964c6654bdSBen Gardon 	}
8974c6654bdSBen Gardon 
8984c6654bdSBen Gardon 	rcu_read_unlock();
8994c6654bdSBen Gardon }
9004c6654bdSBen Gardon 
901bb18842eSBen Gardon /*
902f28e9c7fSSean Christopherson  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
903f28e9c7fSSean Christopherson  * is about to be zapped, e.g. in response to a memslots update.  The caller is
904f28e9c7fSSean Christopherson  * responsible for invoking kvm_tdp_mmu_zap_invalidated_roots() to do the actual
905f28e9c7fSSean Christopherson  * zapping.
906b7cccd39SBen Gardon  *
907f28e9c7fSSean Christopherson  * Take a reference on all roots to prevent the root from being freed before it
908f28e9c7fSSean Christopherson  * is zapped by this thread.  Freeing a root is not a correctness issue, but if
909f28e9c7fSSean Christopherson  * a vCPU drops the last reference to a root prior to the root being zapped, it
910f28e9c7fSSean Christopherson  * will get stuck with tearing down the entire paging structure.
9114c6654bdSBen Gardon  *
912f28e9c7fSSean Christopherson  * Get a reference even if the root is already invalid,
913f28e9c7fSSean Christopherson  * kvm_tdp_mmu_zap_invalidated_roots() assumes it was gifted a reference to all
914f28e9c7fSSean Christopherson  * invalid roots, e.g. there's no epoch to identify roots that were invalidated
915f28e9c7fSSean Christopherson  * by a previous call.  Roots stay on the list until the last reference is
916f28e9c7fSSean Christopherson  * dropped, so even though all invalid roots are zapped, a root may not go away
917f28e9c7fSSean Christopherson  * for quite some time, e.g. if a vCPU blocks across multiple memslot updates.
918f28e9c7fSSean Christopherson  *
919f28e9c7fSSean Christopherson  * Because mmu_lock is held for write, it should be impossible to observe a
920f28e9c7fSSean Christopherson  * root with zero refcount, i.e. the list of roots cannot be stale.
9214c6654bdSBen Gardon  *
922b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
923b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
924b7cccd39SBen Gardon  */
925b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
926b7cccd39SBen Gardon {
927b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
928b7cccd39SBen Gardon 
929b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
930f28e9c7fSSean Christopherson 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
931f28e9c7fSSean Christopherson 		if (!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
932b7cccd39SBen Gardon 			root->role.invalid = true;
933b7cccd39SBen Gardon 	}
934f28e9c7fSSean Christopherson }
935b7cccd39SBen Gardon 
936bb18842eSBen Gardon /*
937bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
938bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
939bb18842eSBen Gardon  */
940cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
941cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
942cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
943bb18842eSBen Gardon {
944c435d4b7SSean Christopherson 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
945bb18842eSBen Gardon 	u64 new_spte;
94657a3e96dSKai Huang 	int ret = RET_PF_FIXED;
947ad67e480SPaolo Bonzini 	bool wrprot = false;
948bb18842eSBen Gardon 
9497158bee4SPaolo Bonzini 	WARN_ON(sp->role.level != fault->goal_level);
950e710c5f6SDavid Matlack 	if (unlikely(!fault->slot))
951bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
9529a77daacSBen Gardon 	else
95353597858SDavid Matlack 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
9542839180cSPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefetch, true,
9557158bee4SPaolo Bonzini 					 fault->map_writable, &new_spte);
956bb18842eSBen Gardon 
957bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
958bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
9593e72c791SDavid Matlack 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
9609a77daacSBen Gardon 		return RET_PF_RETRY;
961bb18842eSBen Gardon 
962bb18842eSBen Gardon 	/*
963bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
964bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
965bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
966bb18842eSBen Gardon 	 */
967ad67e480SPaolo Bonzini 	if (wrprot) {
968cdc47767SPaolo Bonzini 		if (fault->write)
969bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
970bb18842eSBen Gardon 	}
971bb18842eSBen Gardon 
972bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
9739a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
9749a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
9759a77daacSBen Gardon 				     new_spte);
976bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
9773849e092SSean Christopherson 	} else {
9789a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
9799a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
9803849e092SSean Christopherson 	}
981bb18842eSBen Gardon 
982857f8474SKai Huang 	/*
983857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
984857f8474SKai Huang 	 * consistent with legacy MMU behavior.
985857f8474SKai Huang 	 */
986857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
987bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
988bb18842eSBen Gardon 
989bb18842eSBen Gardon 	return ret;
990bb18842eSBen Gardon }
991bb18842eSBen Gardon 
992bb18842eSBen Gardon /*
993cb00a70bSDavid Matlack  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
994cb00a70bSDavid Matlack  * provided page table.
9957b7e1ab6SDavid Matlack  *
9967b7e1ab6SDavid Matlack  * @kvm: kvm instance
9977b7e1ab6SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
9987b7e1ab6SDavid Matlack  * @sp: The new TDP page table to install.
9997b7e1ab6SDavid Matlack  * @account_nx: True if this page table is being installed to split a
10007b7e1ab6SDavid Matlack  *              non-executable huge page.
1001cb00a70bSDavid Matlack  * @shared: This operation is running under the MMU lock in read mode.
10027b7e1ab6SDavid Matlack  *
10037b7e1ab6SDavid Matlack  * Returns: 0 if the new page table was installed. Non-0 if the page table
10047b7e1ab6SDavid Matlack  *          could not be installed (e.g. the atomic compare-exchange failed).
10057b7e1ab6SDavid Matlack  */
1006cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1007cb00a70bSDavid Matlack 			   struct kvm_mmu_page *sp, bool account_nx,
1008cb00a70bSDavid Matlack 			   bool shared)
10097b7e1ab6SDavid Matlack {
10107b7e1ab6SDavid Matlack 	u64 spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask);
1011cb00a70bSDavid Matlack 	int ret = 0;
10127b7e1ab6SDavid Matlack 
1013cb00a70bSDavid Matlack 	if (shared) {
10147b7e1ab6SDavid Matlack 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
10157b7e1ab6SDavid Matlack 		if (ret)
10167b7e1ab6SDavid Matlack 			return ret;
1017cb00a70bSDavid Matlack 	} else {
1018cb00a70bSDavid Matlack 		tdp_mmu_set_spte(kvm, iter, spte);
1019cb00a70bSDavid Matlack 	}
10207b7e1ab6SDavid Matlack 
10217b7e1ab6SDavid Matlack 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
10227b7e1ab6SDavid Matlack 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
10237b7e1ab6SDavid Matlack 	if (account_nx)
10247b7e1ab6SDavid Matlack 		account_huge_nx_page(kvm, sp);
10257b7e1ab6SDavid Matlack 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
10267b7e1ab6SDavid Matlack 
10277b7e1ab6SDavid Matlack 	return 0;
10287b7e1ab6SDavid Matlack }
10297b7e1ab6SDavid Matlack 
10307b7e1ab6SDavid Matlack /*
1031bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1032bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
1033bb18842eSBen Gardon  */
10342f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1035bb18842eSBen Gardon {
1036bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1037bb18842eSBen Gardon 	struct tdp_iter iter;
103889c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1039bb18842eSBen Gardon 	int ret;
1040bb18842eSBen Gardon 
104173a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
1042bb18842eSBen Gardon 
1043f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
10447cca2d0bSBen Gardon 
10457cca2d0bSBen Gardon 	rcu_read_lock();
10467cca2d0bSBen Gardon 
10472f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
104873a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
1049536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1050bb18842eSBen Gardon 
105173a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
1052bb18842eSBen Gardon 			break;
1053bb18842eSBen Gardon 
1054bb18842eSBen Gardon 		/*
1055bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1056bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1057bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1058bb18842eSBen Gardon 		 */
1059bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1060bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
10613e72c791SDavid Matlack 			if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
10629a77daacSBen Gardon 				break;
1063bb18842eSBen Gardon 
1064bb18842eSBen Gardon 			/*
1065bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1066bb18842eSBen Gardon 			 * because the new value informs the !present
1067bb18842eSBen Gardon 			 * path below.
1068bb18842eSBen Gardon 			 */
10697cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1070bb18842eSBen Gardon 		}
1071bb18842eSBen Gardon 
1072bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
10737b7e1ab6SDavid Matlack 			bool account_nx = fault->huge_page_disallowed &&
10747b7e1ab6SDavid Matlack 					  fault->req_level >= iter.level;
10757b7e1ab6SDavid Matlack 
1076ff76d506SKai Huang 			/*
1077c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1078ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1079ff76d506SKai Huang 			 * allocation and free.
1080ff76d506SKai Huang 			 */
1081ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1082ff76d506SKai Huang 				break;
1083ff76d506SKai Huang 
1084a82070b6SDavid Matlack 			sp = tdp_mmu_alloc_sp(vcpu);
1085a82070b6SDavid Matlack 			tdp_mmu_init_child_sp(sp, &iter);
1086a82070b6SDavid Matlack 
1087cb00a70bSDavid Matlack 			if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) {
10889a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
10899a77daacSBen Gardon 				break;
10909a77daacSBen Gardon 			}
1091bb18842eSBen Gardon 		}
1092bb18842eSBen Gardon 	}
1093bb18842eSBen Gardon 
109473a3c659SPaolo Bonzini 	if (iter.level != fault->goal_level) {
10957cca2d0bSBen Gardon 		rcu_read_unlock();
1096bb18842eSBen Gardon 		return RET_PF_RETRY;
10977cca2d0bSBen Gardon 	}
1098bb18842eSBen Gardon 
1099cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
11007cca2d0bSBen Gardon 	rcu_read_unlock();
1101bb18842eSBen Gardon 
1102bb18842eSBen Gardon 	return ret;
1103bb18842eSBen Gardon }
1104063afacdSBen Gardon 
11053039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
11063039bcc7SSean Christopherson 				 bool flush)
1107063afacdSBen Gardon {
110883b83a02SSean Christopherson 	return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
110983b83a02SSean Christopherson 					   range->end, range->may_block, flush);
11103039bcc7SSean Christopherson }
11113039bcc7SSean Christopherson 
11123039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
11133039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
11143039bcc7SSean Christopherson 
11153039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
11163039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1117c1b91493SSean Christopherson 						   tdp_handler_t handler)
1118063afacdSBen Gardon {
1119063afacdSBen Gardon 	struct kvm_mmu_page *root;
11203039bcc7SSean Christopherson 	struct tdp_iter iter;
11213039bcc7SSean Christopherson 	bool ret = false;
1122063afacdSBen Gardon 
11233039bcc7SSean Christopherson 	rcu_read_lock();
1124063afacdSBen Gardon 
1125063afacdSBen Gardon 	/*
1126e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1127e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1128063afacdSBen Gardon 	 */
11293039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
11303039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
11313039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
11323039bcc7SSean Christopherson 	}
1133063afacdSBen Gardon 
11343039bcc7SSean Christopherson 	rcu_read_unlock();
1135063afacdSBen Gardon 
1136063afacdSBen Gardon 	return ret;
1137063afacdSBen Gardon }
1138063afacdSBen Gardon 
1139f8e14497SBen Gardon /*
1140f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1141f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1142f8e14497SBen Gardon  */
11433039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
11443039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1145f8e14497SBen Gardon {
1146f8e14497SBen Gardon 	u64 new_spte = 0;
1147f8e14497SBen Gardon 
11483039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
11493039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
11503039bcc7SSean Christopherson 		return false;
11517cca2d0bSBen Gardon 
11523039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1153f8e14497SBen Gardon 
1154f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
11558f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1156f8e14497SBen Gardon 	} else {
1157f8e14497SBen Gardon 		/*
1158f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1159f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1160f8e14497SBen Gardon 		 */
1161f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1162f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1163f8e14497SBen Gardon 
1164f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1165f8e14497SBen Gardon 	}
1166f8e14497SBen Gardon 
11673039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
116833dd3574SBen Gardon 
11693039bcc7SSean Christopherson 	return true;
1170f8e14497SBen Gardon }
1171f8e14497SBen Gardon 
11723039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1173f8e14497SBen Gardon {
11743039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1175f8e14497SBen Gardon }
1176f8e14497SBen Gardon 
11773039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
11783039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1179f8e14497SBen Gardon {
11803039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1181f8e14497SBen Gardon }
1182f8e14497SBen Gardon 
11833039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1184f8e14497SBen Gardon {
11853039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
11863039bcc7SSean Christopherson }
11873039bcc7SSean Christopherson 
11883039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
11893039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
11903039bcc7SSean Christopherson {
11913039bcc7SSean Christopherson 	u64 new_spte;
11923039bcc7SSean Christopherson 
11933039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
11943039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
11953039bcc7SSean Christopherson 
11963039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
11973039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
11983039bcc7SSean Christopherson 		return false;
11993039bcc7SSean Christopherson 
12003039bcc7SSean Christopherson 	/*
12013039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
12023039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
12033039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
12043039bcc7SSean Christopherson 	 * See __handle_changed_spte().
12053039bcc7SSean Christopherson 	 */
12063039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
12073039bcc7SSean Christopherson 
12083039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
12093039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
12103039bcc7SSean Christopherson 								  pte_pfn(range->pte));
12113039bcc7SSean Christopherson 
12123039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
12133039bcc7SSean Christopherson 	}
12143039bcc7SSean Christopherson 
12153039bcc7SSean Christopherson 	return true;
1216f8e14497SBen Gardon }
12171d8dd6b3SBen Gardon 
12181d8dd6b3SBen Gardon /*
12191d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
12201d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
12211d8dd6b3SBen Gardon  * notifier.
12221d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
12231d8dd6b3SBen Gardon  */
12243039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
12251d8dd6b3SBen Gardon {
12263039bcc7SSean Christopherson 	bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
12271d8dd6b3SBen Gardon 
12283039bcc7SSean Christopherson 	/* FIXME: return 'flush' instead of flushing here. */
12293039bcc7SSean Christopherson 	if (flush)
12303039bcc7SSean Christopherson 		kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
12317cca2d0bSBen Gardon 
12323039bcc7SSean Christopherson 	return false;
12331d8dd6b3SBen Gardon }
12341d8dd6b3SBen Gardon 
1235a6a0b05dSBen Gardon /*
1236bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1237bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1238bedd9195SDavid Matlack  * be flushed.
1239a6a0b05dSBen Gardon  */
1240a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1241a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1242a6a0b05dSBen Gardon {
1243a6a0b05dSBen Gardon 	struct tdp_iter iter;
1244a6a0b05dSBen Gardon 	u64 new_spte;
1245a6a0b05dSBen Gardon 	bool spte_set = false;
1246a6a0b05dSBen Gardon 
12477cca2d0bSBen Gardon 	rcu_read_lock();
12487cca2d0bSBen Gardon 
1249a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1250a6a0b05dSBen Gardon 
125177aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
125224ae4cfaSBen Gardon retry:
125324ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12541af4a960SBen Gardon 			continue;
12551af4a960SBen Gardon 
1256a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
12570f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
12580f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1259a6a0b05dSBen Gardon 			continue;
1260a6a0b05dSBen Gardon 
1261a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1262a6a0b05dSBen Gardon 
12633e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
126424ae4cfaSBen Gardon 			goto retry;
12653255530aSDavid Matlack 
1266a6a0b05dSBen Gardon 		spte_set = true;
1267a6a0b05dSBen Gardon 	}
12687cca2d0bSBen Gardon 
12697cca2d0bSBen Gardon 	rcu_read_unlock();
1270a6a0b05dSBen Gardon 	return spte_set;
1271a6a0b05dSBen Gardon }
1272a6a0b05dSBen Gardon 
1273a6a0b05dSBen Gardon /*
1274a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1275a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1276a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1277a6a0b05dSBen Gardon  */
1278269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1279269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1280a6a0b05dSBen Gardon {
1281a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1282a6a0b05dSBen Gardon 	bool spte_set = false;
1283a6a0b05dSBen Gardon 
128424ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1285a6a0b05dSBen Gardon 
1286d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1287a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1288a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1289a6a0b05dSBen Gardon 
1290a6a0b05dSBen Gardon 	return spte_set;
1291a6a0b05dSBen Gardon }
1292a6a0b05dSBen Gardon 
1293a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1294a3fe5dbdSDavid Matlack {
1295a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1296a3fe5dbdSDavid Matlack 
1297a3fe5dbdSDavid Matlack 	gfp |= __GFP_ZERO;
1298a3fe5dbdSDavid Matlack 
1299a3fe5dbdSDavid Matlack 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1300a3fe5dbdSDavid Matlack 	if (!sp)
1301a3fe5dbdSDavid Matlack 		return NULL;
1302a3fe5dbdSDavid Matlack 
1303a3fe5dbdSDavid Matlack 	sp->spt = (void *)__get_free_page(gfp);
1304a3fe5dbdSDavid Matlack 	if (!sp->spt) {
1305a3fe5dbdSDavid Matlack 		kmem_cache_free(mmu_page_header_cache, sp);
1306a3fe5dbdSDavid Matlack 		return NULL;
1307a3fe5dbdSDavid Matlack 	}
1308a3fe5dbdSDavid Matlack 
1309a3fe5dbdSDavid Matlack 	return sp;
1310a3fe5dbdSDavid Matlack }
1311a3fe5dbdSDavid Matlack 
1312a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1313cb00a70bSDavid Matlack 						       struct tdp_iter *iter,
1314cb00a70bSDavid Matlack 						       bool shared)
1315a3fe5dbdSDavid Matlack {
1316a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1317a3fe5dbdSDavid Matlack 
1318a3fe5dbdSDavid Matlack 	/*
1319a3fe5dbdSDavid Matlack 	 * Since we are allocating while under the MMU lock we have to be
1320a3fe5dbdSDavid Matlack 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1321a3fe5dbdSDavid Matlack 	 * reclaim and to avoid making any filesystem callbacks (which can end
1322a3fe5dbdSDavid Matlack 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1323a3fe5dbdSDavid Matlack 	 *
1324a3fe5dbdSDavid Matlack 	 * If this allocation fails we drop the lock and retry with reclaim
1325a3fe5dbdSDavid Matlack 	 * allowed.
1326a3fe5dbdSDavid Matlack 	 */
1327a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1328a3fe5dbdSDavid Matlack 	if (sp)
1329a3fe5dbdSDavid Matlack 		return sp;
1330a3fe5dbdSDavid Matlack 
1331a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1332cb00a70bSDavid Matlack 
1333cb00a70bSDavid Matlack 	if (shared)
1334a3fe5dbdSDavid Matlack 		read_unlock(&kvm->mmu_lock);
1335cb00a70bSDavid Matlack 	else
1336cb00a70bSDavid Matlack 		write_unlock(&kvm->mmu_lock);
1337a3fe5dbdSDavid Matlack 
1338a3fe5dbdSDavid Matlack 	iter->yielded = true;
1339a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1340a3fe5dbdSDavid Matlack 
1341cb00a70bSDavid Matlack 	if (shared)
1342a3fe5dbdSDavid Matlack 		read_lock(&kvm->mmu_lock);
1343cb00a70bSDavid Matlack 	else
1344cb00a70bSDavid Matlack 		write_lock(&kvm->mmu_lock);
1345cb00a70bSDavid Matlack 
1346a3fe5dbdSDavid Matlack 	rcu_read_lock();
1347a3fe5dbdSDavid Matlack 
1348a3fe5dbdSDavid Matlack 	return sp;
1349a3fe5dbdSDavid Matlack }
1350a3fe5dbdSDavid Matlack 
1351cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1352cb00a70bSDavid Matlack 				   struct kvm_mmu_page *sp, bool shared)
1353a3fe5dbdSDavid Matlack {
1354a3fe5dbdSDavid Matlack 	const u64 huge_spte = iter->old_spte;
1355a3fe5dbdSDavid Matlack 	const int level = iter->level;
1356a3fe5dbdSDavid Matlack 	int ret, i;
1357a3fe5dbdSDavid Matlack 
1358a3fe5dbdSDavid Matlack 	tdp_mmu_init_child_sp(sp, iter);
1359a3fe5dbdSDavid Matlack 
1360a3fe5dbdSDavid Matlack 	/*
1361a3fe5dbdSDavid Matlack 	 * No need for atomics when writing to sp->spt since the page table has
1362a3fe5dbdSDavid Matlack 	 * not been linked in yet and thus is not reachable from any other CPU.
1363a3fe5dbdSDavid Matlack 	 */
1364a3fe5dbdSDavid Matlack 	for (i = 0; i < PT64_ENT_PER_PAGE; i++)
1365a3fe5dbdSDavid Matlack 		sp->spt[i] = make_huge_page_split_spte(huge_spte, level, i);
1366a3fe5dbdSDavid Matlack 
1367a3fe5dbdSDavid Matlack 	/*
1368a3fe5dbdSDavid Matlack 	 * Replace the huge spte with a pointer to the populated lower level
1369a3fe5dbdSDavid Matlack 	 * page table. Since we are making this change without a TLB flush vCPUs
1370a3fe5dbdSDavid Matlack 	 * will see a mix of the split mappings and the original huge mapping,
1371a3fe5dbdSDavid Matlack 	 * depending on what's currently in their TLB. This is fine from a
1372a3fe5dbdSDavid Matlack 	 * correctness standpoint since the translation will be the same either
1373a3fe5dbdSDavid Matlack 	 * way.
1374a3fe5dbdSDavid Matlack 	 */
1375cb00a70bSDavid Matlack 	ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
1376a3fe5dbdSDavid Matlack 	if (ret)
1377e0b728b1SDavid Matlack 		goto out;
1378a3fe5dbdSDavid Matlack 
1379a3fe5dbdSDavid Matlack 	/*
1380a3fe5dbdSDavid Matlack 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1381a3fe5dbdSDavid Matlack 	 * are overwriting from the page stats. But we have to manually update
1382a3fe5dbdSDavid Matlack 	 * the page stats with the new present child pages.
1383a3fe5dbdSDavid Matlack 	 */
1384a3fe5dbdSDavid Matlack 	kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
1385a3fe5dbdSDavid Matlack 
1386e0b728b1SDavid Matlack out:
1387e0b728b1SDavid Matlack 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1388e0b728b1SDavid Matlack 	return ret;
1389a3fe5dbdSDavid Matlack }
1390a3fe5dbdSDavid Matlack 
1391a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1392a3fe5dbdSDavid Matlack 					 struct kvm_mmu_page *root,
1393a3fe5dbdSDavid Matlack 					 gfn_t start, gfn_t end,
1394cb00a70bSDavid Matlack 					 int target_level, bool shared)
1395a3fe5dbdSDavid Matlack {
1396a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp = NULL;
1397a3fe5dbdSDavid Matlack 	struct tdp_iter iter;
1398a3fe5dbdSDavid Matlack 	int ret = 0;
1399a3fe5dbdSDavid Matlack 
1400a3fe5dbdSDavid Matlack 	rcu_read_lock();
1401a3fe5dbdSDavid Matlack 
1402a3fe5dbdSDavid Matlack 	/*
1403a3fe5dbdSDavid Matlack 	 * Traverse the page table splitting all huge pages above the target
1404a3fe5dbdSDavid Matlack 	 * level into one lower level. For example, if we encounter a 1GB page
1405a3fe5dbdSDavid Matlack 	 * we split it into 512 2MB pages.
1406a3fe5dbdSDavid Matlack 	 *
1407a3fe5dbdSDavid Matlack 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1408a3fe5dbdSDavid Matlack 	 * to visit an SPTE before ever visiting its children, which means we
1409a3fe5dbdSDavid Matlack 	 * will correctly recursively split huge pages that are more than one
1410a3fe5dbdSDavid Matlack 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1411a3fe5dbdSDavid Matlack 	 * and then splitting each of those to 512 4KB pages).
1412a3fe5dbdSDavid Matlack 	 */
1413a3fe5dbdSDavid Matlack 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1414a3fe5dbdSDavid Matlack retry:
1415cb00a70bSDavid Matlack 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1416a3fe5dbdSDavid Matlack 			continue;
1417a3fe5dbdSDavid Matlack 
1418a3fe5dbdSDavid Matlack 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1419a3fe5dbdSDavid Matlack 			continue;
1420a3fe5dbdSDavid Matlack 
1421a3fe5dbdSDavid Matlack 		if (!sp) {
1422cb00a70bSDavid Matlack 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1423a3fe5dbdSDavid Matlack 			if (!sp) {
1424a3fe5dbdSDavid Matlack 				ret = -ENOMEM;
1425e0b728b1SDavid Matlack 				trace_kvm_mmu_split_huge_page(iter.gfn,
1426e0b728b1SDavid Matlack 							      iter.old_spte,
1427e0b728b1SDavid Matlack 							      iter.level, ret);
1428a3fe5dbdSDavid Matlack 				break;
1429a3fe5dbdSDavid Matlack 			}
1430a3fe5dbdSDavid Matlack 
1431a3fe5dbdSDavid Matlack 			if (iter.yielded)
1432a3fe5dbdSDavid Matlack 				continue;
1433a3fe5dbdSDavid Matlack 		}
1434a3fe5dbdSDavid Matlack 
1435cb00a70bSDavid Matlack 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1436a3fe5dbdSDavid Matlack 			goto retry;
1437a3fe5dbdSDavid Matlack 
1438a3fe5dbdSDavid Matlack 		sp = NULL;
1439a3fe5dbdSDavid Matlack 	}
1440a3fe5dbdSDavid Matlack 
1441a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1442a3fe5dbdSDavid Matlack 
1443a3fe5dbdSDavid Matlack 	/*
1444a3fe5dbdSDavid Matlack 	 * It's possible to exit the loop having never used the last sp if, for
1445a3fe5dbdSDavid Matlack 	 * example, a vCPU doing HugePage NX splitting wins the race and
1446a3fe5dbdSDavid Matlack 	 * installs its own sp in place of the last sp we tried to split.
1447a3fe5dbdSDavid Matlack 	 */
1448a3fe5dbdSDavid Matlack 	if (sp)
1449a3fe5dbdSDavid Matlack 		tdp_mmu_free_sp(sp);
1450a3fe5dbdSDavid Matlack 
1451a3fe5dbdSDavid Matlack 	return ret;
1452a3fe5dbdSDavid Matlack }
1453a3fe5dbdSDavid Matlack 
1454cb00a70bSDavid Matlack 
1455a3fe5dbdSDavid Matlack /*
1456a3fe5dbdSDavid Matlack  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1457a3fe5dbdSDavid Matlack  */
1458a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1459a3fe5dbdSDavid Matlack 				      const struct kvm_memory_slot *slot,
1460a3fe5dbdSDavid Matlack 				      gfn_t start, gfn_t end,
1461cb00a70bSDavid Matlack 				      int target_level, bool shared)
1462a3fe5dbdSDavid Matlack {
1463a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *root;
1464a3fe5dbdSDavid Matlack 	int r = 0;
1465a3fe5dbdSDavid Matlack 
1466cb00a70bSDavid Matlack 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1467a3fe5dbdSDavid Matlack 
14687c554d8eSPaolo Bonzini 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1469cb00a70bSDavid Matlack 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1470a3fe5dbdSDavid Matlack 		if (r) {
1471cb00a70bSDavid Matlack 			kvm_tdp_mmu_put_root(kvm, root, shared);
1472a3fe5dbdSDavid Matlack 			break;
1473a3fe5dbdSDavid Matlack 		}
1474a3fe5dbdSDavid Matlack 	}
1475a3fe5dbdSDavid Matlack }
1476a3fe5dbdSDavid Matlack 
1477a6a0b05dSBen Gardon /*
1478a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1479a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1480a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1481a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1482a6a0b05dSBen Gardon  * be flushed.
1483a6a0b05dSBen Gardon  */
1484a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1485a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1486a6a0b05dSBen Gardon {
1487a6a0b05dSBen Gardon 	struct tdp_iter iter;
1488a6a0b05dSBen Gardon 	u64 new_spte;
1489a6a0b05dSBen Gardon 	bool spte_set = false;
1490a6a0b05dSBen Gardon 
14917cca2d0bSBen Gardon 	rcu_read_lock();
14927cca2d0bSBen Gardon 
1493a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
149424ae4cfaSBen Gardon retry:
149524ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
14961af4a960SBen Gardon 			continue;
14971af4a960SBen Gardon 
14983354ef5aSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
14993354ef5aSSean Christopherson 			continue;
15003354ef5aSSean Christopherson 
1501a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1502a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1503a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1504a6a0b05dSBen Gardon 			else
1505a6a0b05dSBen Gardon 				continue;
1506a6a0b05dSBen Gardon 		} else {
1507a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1508a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1509a6a0b05dSBen Gardon 			else
1510a6a0b05dSBen Gardon 				continue;
1511a6a0b05dSBen Gardon 		}
1512a6a0b05dSBen Gardon 
15133e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
151424ae4cfaSBen Gardon 			goto retry;
15153255530aSDavid Matlack 
1516a6a0b05dSBen Gardon 		spte_set = true;
1517a6a0b05dSBen Gardon 	}
15187cca2d0bSBen Gardon 
15197cca2d0bSBen Gardon 	rcu_read_unlock();
1520a6a0b05dSBen Gardon 	return spte_set;
1521a6a0b05dSBen Gardon }
1522a6a0b05dSBen Gardon 
1523a6a0b05dSBen Gardon /*
1524a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1525a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1526a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1527a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1528a6a0b05dSBen Gardon  * be flushed.
1529a6a0b05dSBen Gardon  */
1530269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1531269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1532a6a0b05dSBen Gardon {
1533a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1534a6a0b05dSBen Gardon 	bool spte_set = false;
1535a6a0b05dSBen Gardon 
153624ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1537a6a0b05dSBen Gardon 
1538d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1539a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1540a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1541a6a0b05dSBen Gardon 
1542a6a0b05dSBen Gardon 	return spte_set;
1543a6a0b05dSBen Gardon }
1544a6a0b05dSBen Gardon 
1545a6a0b05dSBen Gardon /*
1546a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1547a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1548a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1549a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1550a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1551a6a0b05dSBen Gardon  */
1552a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1553a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1554a6a0b05dSBen Gardon {
1555a6a0b05dSBen Gardon 	struct tdp_iter iter;
1556a6a0b05dSBen Gardon 	u64 new_spte;
1557a6a0b05dSBen Gardon 
15587cca2d0bSBen Gardon 	rcu_read_lock();
15597cca2d0bSBen Gardon 
1560a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1561a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1562a6a0b05dSBen Gardon 		if (!mask)
1563a6a0b05dSBen Gardon 			break;
1564a6a0b05dSBen Gardon 
1565a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1566a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1567a6a0b05dSBen Gardon 			continue;
1568a6a0b05dSBen Gardon 
1569f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1570f1b3b06aSBen Gardon 
1571a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1572a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1573a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1574a6a0b05dSBen Gardon 			else
1575a6a0b05dSBen Gardon 				continue;
1576a6a0b05dSBen Gardon 		} else {
1577a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1578a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1579a6a0b05dSBen Gardon 			else
1580a6a0b05dSBen Gardon 				continue;
1581a6a0b05dSBen Gardon 		}
1582a6a0b05dSBen Gardon 
1583a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1584a6a0b05dSBen Gardon 	}
15857cca2d0bSBen Gardon 
15867cca2d0bSBen Gardon 	rcu_read_unlock();
1587a6a0b05dSBen Gardon }
1588a6a0b05dSBen Gardon 
1589a6a0b05dSBen Gardon /*
1590a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1591a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1592a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1593a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1594a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1595a6a0b05dSBen Gardon  */
1596a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1597a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1598a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1599a6a0b05dSBen Gardon 				       bool wrprot)
1600a6a0b05dSBen Gardon {
1601a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1602a6a0b05dSBen Gardon 
1603531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1604a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1605a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1606a6a0b05dSBen Gardon }
1607a6a0b05dSBen Gardon 
1608a6a0b05dSBen Gardon /*
160987aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
161087aa9ec9SBen Gardon  * GFNs within the slot.
161114881998SBen Gardon  */
16124b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm,
161314881998SBen Gardon 				       struct kvm_mmu_page *root,
16144b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
161514881998SBen Gardon {
16169eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
16179eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
161814881998SBen Gardon 	struct tdp_iter iter;
161914881998SBen Gardon 	kvm_pfn_t pfn;
162014881998SBen Gardon 
16217cca2d0bSBen Gardon 	rcu_read_lock();
16227cca2d0bSBen Gardon 
162314881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
16242db6f772SBen Gardon retry:
16254b85c921SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
16261af4a960SBen Gardon 			continue;
16271af4a960SBen Gardon 
162814881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
162987aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
163014881998SBen Gardon 			continue;
163114881998SBen Gardon 
163214881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
163314881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
16349eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
16359eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
163614881998SBen Gardon 			continue;
163714881998SBen Gardon 
16384b85c921SSean Christopherson 		/* Note, a successful atomic zap also does a remote TLB flush. */
16393e72c791SDavid Matlack 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
16402db6f772SBen Gardon 			goto retry;
16412db6f772SBen Gardon 	}
164214881998SBen Gardon 
16437cca2d0bSBen Gardon 	rcu_read_unlock();
164414881998SBen Gardon }
164514881998SBen Gardon 
164614881998SBen Gardon /*
164714881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
164814881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
164914881998SBen Gardon  */
16504b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
16514b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
165214881998SBen Gardon {
165314881998SBen Gardon 	struct kvm_mmu_page *root;
165414881998SBen Gardon 
16552db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
165614881998SBen Gardon 
1657d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
16584b85c921SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
165914881998SBen Gardon }
166046044f72SBen Gardon 
166146044f72SBen Gardon /*
166246044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
16635fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
166446044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
166546044f72SBen Gardon  */
166646044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
16673ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
166846044f72SBen Gardon {
166946044f72SBen Gardon 	struct tdp_iter iter;
167046044f72SBen Gardon 	u64 new_spte;
167146044f72SBen Gardon 	bool spte_set = false;
167246044f72SBen Gardon 
16733ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
16743ad93562SKeqian Zhu 
16757cca2d0bSBen Gardon 	rcu_read_lock();
16767cca2d0bSBen Gardon 
167777aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
16783ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
16793ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
16803ad93562SKeqian Zhu 			continue;
16813ad93562SKeqian Zhu 
168246044f72SBen Gardon 		new_spte = iter.old_spte &
16835fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
168446044f72SBen Gardon 
16857c8a4742SDavid Matlack 		if (new_spte == iter.old_spte)
16867c8a4742SDavid Matlack 			break;
16877c8a4742SDavid Matlack 
168846044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
168946044f72SBen Gardon 		spte_set = true;
169046044f72SBen Gardon 	}
169146044f72SBen Gardon 
16927cca2d0bSBen Gardon 	rcu_read_unlock();
16937cca2d0bSBen Gardon 
169446044f72SBen Gardon 	return spte_set;
169546044f72SBen Gardon }
169646044f72SBen Gardon 
169746044f72SBen Gardon /*
169846044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
16995fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
170046044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
170146044f72SBen Gardon  */
170246044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
17033ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
17043ad93562SKeqian Zhu 				   int min_level)
170546044f72SBen Gardon {
170646044f72SBen Gardon 	struct kvm_mmu_page *root;
170746044f72SBen Gardon 	bool spte_set = false;
170846044f72SBen Gardon 
1709531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1710a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
17113ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1712a3f15bdaSSean Christopherson 
171346044f72SBen Gardon 	return spte_set;
171446044f72SBen Gardon }
171546044f72SBen Gardon 
171695fb5b02SBen Gardon /*
171795fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
171895fb5b02SBen Gardon  * That SPTE may be non-present.
1719c5c8c7c5SDavid Matlack  *
1720c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
172195fb5b02SBen Gardon  */
172239b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
172339b4d43eSSean Christopherson 			 int *root_level)
172495fb5b02SBen Gardon {
172595fb5b02SBen Gardon 	struct tdp_iter iter;
172695fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
172795fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
17282aa07893SSean Christopherson 	int leaf = -1;
172995fb5b02SBen Gardon 
173039b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
173195fb5b02SBen Gardon 
173295fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
173395fb5b02SBen Gardon 		leaf = iter.level;
1734dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
173595fb5b02SBen Gardon 	}
173695fb5b02SBen Gardon 
173795fb5b02SBen Gardon 	return leaf;
173895fb5b02SBen Gardon }
17396e8eb206SDavid Matlack 
17406e8eb206SDavid Matlack /*
17416e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
17426e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
17436e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
17446e8eb206SDavid Matlack  *
17456e8eb206SDavid Matlack  * Contract:
17466e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
17476e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
17486e8eb206SDavid Matlack  *
17496e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
17506e8eb206SDavid Matlack  */
17516e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
17526e8eb206SDavid Matlack 					u64 *spte)
17536e8eb206SDavid Matlack {
17546e8eb206SDavid Matlack 	struct tdp_iter iter;
17556e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
17566e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
17576e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
17586e8eb206SDavid Matlack 
17596e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
17606e8eb206SDavid Matlack 		*spte = iter.old_spte;
17616e8eb206SDavid Matlack 		sptep = iter.sptep;
17626e8eb206SDavid Matlack 	}
17636e8eb206SDavid Matlack 
17646e8eb206SDavid Matlack 	/*
17656e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
17666e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
17676e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
17686e8eb206SDavid Matlack 	 * annotation.
17696e8eb206SDavid Matlack 	 *
17706e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
17716e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
17726e8eb206SDavid Matlack 	 * outside of mmu_lock.
17736e8eb206SDavid Matlack 	 */
17746e8eb206SDavid Matlack 	return rcu_dereference(sptep);
17756e8eb206SDavid Matlack }
1776