xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision bd296779)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20d501f747SBen Gardon 		return false;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28d501f747SBen Gardon 
29d501f747SBen Gardon 	return true;
30fe5db27dSBen Gardon }
31fe5db27dSBen Gardon 
32226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */
33226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
346103bc07SBen Gardon 							     bool shared)
356103bc07SBen Gardon {
366103bc07SBen Gardon 	if (shared)
376103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
386103bc07SBen Gardon 	else
396103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
40226b8c8fSSean Christopherson 
41226b8c8fSSean Christopherson 	return true;
426103bc07SBen Gardon }
436103bc07SBen Gardon 
44fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
45fe5db27dSBen Gardon {
46fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
47fe5db27dSBen Gardon 		return;
4802c00b3aSBen Gardon 
49524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
5002c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
517cca2d0bSBen Gardon 
527cca2d0bSBen Gardon 	/*
537cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
547cca2d0bSBen Gardon 	 * can run before the VM is torn down.
557cca2d0bSBen Gardon 	 */
567cca2d0bSBen Gardon 	rcu_barrier();
5702c00b3aSBen Gardon }
5802c00b3aSBen Gardon 
592bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
60a889ea54SBen Gardon {
612bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
622bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
63a889ea54SBen Gardon }
64a889ea54SBen Gardon 
65c0e64238SBen Gardon /*
66c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
67c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
68c0e64238SBen Gardon  * the memory.
69c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
70c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
71c0e64238SBen Gardon  * memory won't use it after it is freed.
72c0e64238SBen Gardon  */
73c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
74a889ea54SBen Gardon {
75c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
76c0e64238SBen Gardon 					       rcu_head);
77a889ea54SBen Gardon 
78c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
79a889ea54SBen Gardon }
80a889ea54SBen Gardon 
81e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
82e2b5b21dSSean Christopherson 			     bool shared);
83e2b5b21dSSean Christopherson 
846103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
856103bc07SBen Gardon 			  bool shared)
862bdb3d84SBen Gardon {
876103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
882bdb3d84SBen Gardon 
8911cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
902bdb3d84SBen Gardon 		return;
912bdb3d84SBen Gardon 
922bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
932bdb3d84SBen Gardon 
94c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
95c0e64238SBen Gardon 	list_del_rcu(&root->link);
96c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
972bdb3d84SBen Gardon 
98db01416bSSean Christopherson 	/*
99db01416bSSean Christopherson 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
100db01416bSSean Christopherson 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
101db01416bSSean Christopherson 	 * to a different pCPU.  Note, the local TLB flush on reuse also
102db01416bSSean Christopherson 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
103db01416bSSean Christopherson 	 * intermediate paging structures, that may be zapped, as such entries
104db01416bSSean Christopherson 	 * are associated with the ASID on both VMX and SVM.
105db01416bSSean Christopherson 	 */
106e2b5b21dSSean Christopherson 	tdp_mmu_zap_root(kvm, root, shared);
1072bdb3d84SBen Gardon 
108c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
109a889ea54SBen Gardon }
110a889ea54SBen Gardon 
111cfc10997SBen Gardon /*
112d62007edSSean Christopherson  * Returns the next root after @prev_root (or the first root if @prev_root is
113d62007edSSean Christopherson  * NULL).  A reference to the returned root is acquired, and the reference to
114d62007edSSean Christopherson  * @prev_root is released (the caller obviously must hold a reference to
115d62007edSSean Christopherson  * @prev_root if it's non-NULL).
116d62007edSSean Christopherson  *
117d62007edSSean Christopherson  * If @only_valid is true, invalid roots are skipped.
118d62007edSSean Christopherson  *
119d62007edSSean Christopherson  * Returns NULL if the end of tdp_mmu_roots was reached.
120cfc10997SBen Gardon  */
121cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
1226103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
123d62007edSSean Christopherson 					      bool shared, bool only_valid)
124a889ea54SBen Gardon {
125a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
126a889ea54SBen Gardon 
127c0e64238SBen Gardon 	rcu_read_lock();
128c0e64238SBen Gardon 
129cfc10997SBen Gardon 	if (prev_root)
130c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
131c0e64238SBen Gardon 						  &prev_root->link,
132c0e64238SBen Gardon 						  typeof(*prev_root), link);
133cfc10997SBen Gardon 	else
134c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
135cfc10997SBen Gardon 						   typeof(*next_root), link);
136cfc10997SBen Gardon 
13704dc4e6cSSean Christopherson 	while (next_root) {
138d62007edSSean Christopherson 		if ((!only_valid || !next_root->role.invalid) &&
139ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(next_root))
14004dc4e6cSSean Christopherson 			break;
14104dc4e6cSSean Christopherson 
142c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
143c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
14404dc4e6cSSean Christopherson 	}
145fb101293SBen Gardon 
146c0e64238SBen Gardon 	rcu_read_unlock();
147cfc10997SBen Gardon 
148cfc10997SBen Gardon 	if (prev_root)
1496103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
150cfc10997SBen Gardon 
151a889ea54SBen Gardon 	return next_root;
152a889ea54SBen Gardon }
153a889ea54SBen Gardon 
154a889ea54SBen Gardon /*
155a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
156a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
157a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
158a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
1596103bc07SBen Gardon  *
1606103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
1616103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
1626103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
163a889ea54SBen Gardon  */
164d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
165d62007edSSean Christopherson 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
166cfc10997SBen Gardon 	     _root;								\
167d62007edSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
168614f6970SPaolo Bonzini 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
169614f6970SPaolo Bonzini 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
170a3f15bdaSSean Christopherson 		} else
171a889ea54SBen Gardon 
172d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
173d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
174d62007edSSean Christopherson 
175614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
176614f6970SPaolo Bonzini 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
177d62007edSSean Christopherson 
178226b8c8fSSean Christopherson /*
179226b8c8fSSean Christopherson  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
180226b8c8fSSean Christopherson  * the implication being that any flow that holds mmu_lock for read is
181226b8c8fSSean Christopherson  * inherently yield-friendly and should use the yield-safe variant above.
182226b8c8fSSean Christopherson  * Holding mmu_lock for write obviates the need for RCU protection as the list
183226b8c8fSSean Christopherson  * is guaranteed to be stable.
184226b8c8fSSean Christopherson  */
185a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
186226b8c8fSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
187226b8c8fSSean Christopherson 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
188226b8c8fSSean Christopherson 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
189a3f15bdaSSean Christopherson 		} else
19002c00b3aSBen Gardon 
191a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
19202c00b3aSBen Gardon {
19302c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
19402c00b3aSBen Gardon 
19502c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
19602c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
197a82070b6SDavid Matlack 
198a82070b6SDavid Matlack 	return sp;
199a82070b6SDavid Matlack }
200a82070b6SDavid Matlack 
201c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
202c10743a1SSean Christopherson 			    gfn_t gfn, union kvm_mmu_page_role role)
203a82070b6SDavid Matlack {
20402c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
20502c00b3aSBen Gardon 
206a3aca4deSDavid Matlack 	sp->role = role;
20702c00b3aSBen Gardon 	sp->gfn = gfn;
208c10743a1SSean Christopherson 	sp->ptep = sptep;
20902c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
21002c00b3aSBen Gardon 
21133dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
21202c00b3aSBen Gardon }
21302c00b3aSBen Gardon 
214a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
215a3aca4deSDavid Matlack 				  struct tdp_iter *iter)
216a3aca4deSDavid Matlack {
217a3aca4deSDavid Matlack 	struct kvm_mmu_page *parent_sp;
218a3aca4deSDavid Matlack 	union kvm_mmu_page_role role;
219a3aca4deSDavid Matlack 
220a3aca4deSDavid Matlack 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
221a3aca4deSDavid Matlack 
222a3aca4deSDavid Matlack 	role = parent_sp->role;
223a3aca4deSDavid Matlack 	role.level--;
224a3aca4deSDavid Matlack 
225c10743a1SSean Christopherson 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
226a3aca4deSDavid Matlack }
227a3aca4deSDavid Matlack 
2286e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
22902c00b3aSBen Gardon {
230a3aca4deSDavid Matlack 	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
23102c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
23202c00b3aSBen Gardon 	struct kvm_mmu_page *root;
23302c00b3aSBen Gardon 
2346e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
23502c00b3aSBen Gardon 
23604dc4e6cSSean Christopherson 	/*
23704dc4e6cSSean Christopherson 	 * Check for an existing root before allocating a new one.  Note, the
23804dc4e6cSSean Christopherson 	 * role check prevents consuming an invalid root.
23904dc4e6cSSean Christopherson 	 */
240a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
241fb101293SBen Gardon 		if (root->role.word == role.word &&
242ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(root))
2436e6ec584SSean Christopherson 			goto out;
24402c00b3aSBen Gardon 	}
24502c00b3aSBen Gardon 
246a82070b6SDavid Matlack 	root = tdp_mmu_alloc_sp(vcpu);
247c10743a1SSean Christopherson 	tdp_mmu_init_sp(root, NULL, 0, role);
248a82070b6SDavid Matlack 
24911cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
25002c00b3aSBen Gardon 
251c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
252c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
253c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
25402c00b3aSBen Gardon 
2556e6ec584SSean Christopherson out:
25602c00b3aSBen Gardon 	return __pa(root->spt);
257fe5db27dSBen Gardon }
2582f2fad08SBen Gardon 
2592f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
2609a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
2619a77daacSBen Gardon 				bool shared);
2622f2fad08SBen Gardon 
263f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
264f8e14497SBen Gardon {
265f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
266f8e14497SBen Gardon 		return;
267f8e14497SBen Gardon 
268f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
26964bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
27064bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
271f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
272f8e14497SBen Gardon }
273f8e14497SBen Gardon 
274a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
275a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
276a6a0b05dSBen Gardon {
277a6a0b05dSBen Gardon 	bool pfn_changed;
278a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
279a6a0b05dSBen Gardon 
280a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
281a6a0b05dSBen Gardon 		return;
282a6a0b05dSBen Gardon 
283a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
284a6a0b05dSBen Gardon 
285a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
286a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
287a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
288fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
289a6a0b05dSBen Gardon 	}
290a6a0b05dSBen Gardon }
291a6a0b05dSBen Gardon 
2922f2fad08SBen Gardon /**
293c298a30cSDavid Matlack  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
294a9442f59SBen Gardon  *
295a9442f59SBen Gardon  * @kvm: kvm instance
296a9442f59SBen Gardon  * @sp: the page to be removed
2979a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2989a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2999a77daacSBen Gardon  *	    threads that might be adding or removing pages.
300a9442f59SBen Gardon  */
301c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
3029a77daacSBen Gardon 			      bool shared)
303a9442f59SBen Gardon {
3049a77daacSBen Gardon 	if (shared)
3059a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
3069a77daacSBen Gardon 	else
307a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
308a9442f59SBen Gardon 
309a9442f59SBen Gardon 	list_del(&sp->link);
310a9442f59SBen Gardon 	if (sp->lpage_disallowed)
311a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
3129a77daacSBen Gardon 
3139a77daacSBen Gardon 	if (shared)
3149a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
315a9442f59SBen Gardon }
316a9442f59SBen Gardon 
317a9442f59SBen Gardon /**
3180f53dfa3SDavid Matlack  * handle_removed_pt() - handle a page table removed from the TDP structure
319a066e61fSBen Gardon  *
320a066e61fSBen Gardon  * @kvm: kvm instance
321a066e61fSBen Gardon  * @pt: the page removed from the paging structure
3229a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
3239a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
3249a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
325a066e61fSBen Gardon  *
326a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
327a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
32870fb3e41SBen Gardon  *
32970fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
33070fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
33170fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
33270fb3e41SBen Gardon  * early rcu_dereferences in the function.
333a066e61fSBen Gardon  */
3340f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
335a066e61fSBen Gardon {
33670fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
337a066e61fSBen Gardon 	int level = sp->role.level;
338e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
339a066e61fSBen Gardon 	int i;
340a066e61fSBen Gardon 
341a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
342a066e61fSBen Gardon 
343c298a30cSDavid Matlack 	tdp_mmu_unlink_sp(kvm, sp, shared);
344a066e61fSBen Gardon 
345a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
346574c3c55SBen Gardon 		u64 *sptep = rcu_dereference(pt) + i;
347574c3c55SBen Gardon 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
348574c3c55SBen Gardon 		u64 old_child_spte;
3499a77daacSBen Gardon 
3509a77daacSBen Gardon 		if (shared) {
351e25f0e0cSBen Gardon 			/*
352e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
353e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
354e25f0e0cSBen Gardon 			 * already marked as removed then another thread
355e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
356e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
357e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
358e25f0e0cSBen Gardon 			 */
359e25f0e0cSBen Gardon 			for (;;) {
360e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
361e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
362e25f0e0cSBen Gardon 					break;
363e25f0e0cSBen Gardon 				cpu_relax();
364e25f0e0cSBen Gardon 			}
3659a77daacSBen Gardon 		} else {
3668df9f1afSSean Christopherson 			/*
3678df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3688df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3698df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3708df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3718df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3728df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3738df9f1afSSean Christopherson 			 * unreachable.
3748df9f1afSSean Christopherson 			 */
3759a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3768df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3778df9f1afSSean Christopherson 				continue;
378e25f0e0cSBen Gardon 
379e25f0e0cSBen Gardon 			/*
380e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
381e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
382e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
383e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
384e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
385e25f0e0cSBen Gardon 			 * the function.
386e25f0e0cSBen Gardon 			 */
387e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3889a77daacSBen Gardon 		}
389e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
390f1b83255SKai Huang 				    old_child_spte, REMOVED_SPTE, level,
391e25f0e0cSBen Gardon 				    shared);
392a066e61fSBen Gardon 	}
393a066e61fSBen Gardon 
394574c3c55SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
395f1b83255SKai Huang 					   KVM_PAGES_PER_HPAGE(level + 1));
396a066e61fSBen Gardon 
3977cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
398a066e61fSBen Gardon }
399a066e61fSBen Gardon 
400a066e61fSBen Gardon /**
4017f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
4022f2fad08SBen Gardon  * @kvm: kvm instance
4032f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
4042f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
4052f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
4062f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
4072f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
4089a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
4099a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
4109a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
4112f2fad08SBen Gardon  *
4122f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
4132f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
4142f2fad08SBen Gardon  */
4152f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4169a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
4179a77daacSBen Gardon 				  bool shared)
4182f2fad08SBen Gardon {
4192f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
4202f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
4212f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
4222f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
4232f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
4242f2fad08SBen Gardon 
4252f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
4262f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
427764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
4282f2fad08SBen Gardon 
4292f2fad08SBen Gardon 	/*
4302f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
4312f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
4322f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
433d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
4342f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
4352f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
4362f2fad08SBen Gardon 	 * thread before replacement.
4372f2fad08SBen Gardon 	 */
4382f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
4392f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4402f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4412f2fad08SBen Gardon 		       "different PFN!\n"
4422f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4432f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4442f2fad08SBen Gardon 
4452f2fad08SBen Gardon 		/*
4462f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
447d9f6e12fSIngo Molnar 		 * corruption.
4482f2fad08SBen Gardon 		 */
4492f2fad08SBen Gardon 		BUG();
4502f2fad08SBen Gardon 	}
4512f2fad08SBen Gardon 
4522f2fad08SBen Gardon 	if (old_spte == new_spte)
4532f2fad08SBen Gardon 		return;
4542f2fad08SBen Gardon 
455b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
456b9a98c34SBen Gardon 
457115111efSDavid Matlack 	if (is_leaf)
458115111efSDavid Matlack 		check_spte_writable_invariants(new_spte);
459115111efSDavid Matlack 
4602f2fad08SBen Gardon 	/*
4612f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4622f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4632f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4642f2fad08SBen Gardon 	 */
4652f2fad08SBen Gardon 	if (!was_present && !is_present) {
4662f2fad08SBen Gardon 		/*
46708f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
46808f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
46908f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
47008f07c80SBen Gardon 		 * are nonpresent.
4712f2fad08SBen Gardon 		 */
47208f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
47308f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
47408f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4752f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4762f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4772f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
47808f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
47908f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4802f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4812f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4822f2fad08SBen Gardon 		return;
4832f2fad08SBen Gardon 	}
4842f2fad08SBen Gardon 
48571f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
48671f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
4872f2fad08SBen Gardon 
4882f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
48964bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4902f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4912f2fad08SBen Gardon 
4922f2fad08SBen Gardon 	/*
4932f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
494c8e5a0d0SSean Christopherson 	 * the paging structure.  Note the WARN on the PFN changing without the
495c8e5a0d0SSean Christopherson 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
496c8e5a0d0SSean Christopherson 	 * pages are kernel allocations and should never be migrated.
4972f2fad08SBen Gardon 	 */
498c8e5a0d0SSean Christopherson 	if (was_present && !was_leaf &&
499c8e5a0d0SSean Christopherson 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
5000f53dfa3SDavid Matlack 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
5012f2fad08SBen Gardon }
5022f2fad08SBen Gardon 
5032f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
5049a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
5059a77daacSBen Gardon 				bool shared)
5062f2fad08SBen Gardon {
5079a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
5089a77daacSBen Gardon 			      shared);
509f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
510a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
511a6a0b05dSBen Gardon 				      new_spte, level);
5122f2fad08SBen Gardon }
513faaf05b0SBen Gardon 
514fe43fa2fSBen Gardon /*
5156ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
5166ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
51724ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
5189a77daacSBen Gardon  *
5193255530aSDavid Matlack  * If setting the SPTE fails because it has changed, iter->old_spte will be
5203255530aSDavid Matlack  * refreshed to the current value of the spte.
5213255530aSDavid Matlack  *
5229a77daacSBen Gardon  * @kvm: kvm instance
5239a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
5249a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
5253e72c791SDavid Matlack  * Return:
5263e72c791SDavid Matlack  * * 0      - If the SPTE was set.
5273e72c791SDavid Matlack  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
5283e72c791SDavid Matlack  *            no side-effects other than setting iter->old_spte to the last
5293e72c791SDavid Matlack  *            known value of the spte.
5309a77daacSBen Gardon  */
5313e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
5329a77daacSBen Gardon 					  struct tdp_iter *iter,
5339a77daacSBen Gardon 					  u64 new_spte)
5349a77daacSBen Gardon {
5353255530aSDavid Matlack 	u64 *sptep = rcu_dereference(iter->sptep);
5363255530aSDavid Matlack 	u64 old_spte;
5373255530aSDavid Matlack 
5383a0f64deSSean Christopherson 	WARN_ON_ONCE(iter->yielded);
5393a0f64deSSean Christopherson 
5409a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
5419a77daacSBen Gardon 
54208f07c80SBen Gardon 	/*
54308f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
54408f07c80SBen Gardon 	 * may modify it.
54508f07c80SBen Gardon 	 */
5467a51393aSSean Christopherson 	if (is_removed_spte(iter->old_spte))
5473e72c791SDavid Matlack 		return -EBUSY;
54808f07c80SBen Gardon 
5496e8eb206SDavid Matlack 	/*
5506e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
5516e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
5526e8eb206SDavid Matlack 	 */
5533255530aSDavid Matlack 	old_spte = cmpxchg64(sptep, iter->old_spte, new_spte);
5543255530aSDavid Matlack 	if (old_spte != iter->old_spte) {
5553255530aSDavid Matlack 		/*
5563255530aSDavid Matlack 		 * The page table entry was modified by a different logical
5573255530aSDavid Matlack 		 * CPU. Refresh iter->old_spte with the current value so the
5583255530aSDavid Matlack 		 * caller operates on fresh data, e.g. if it retries
5593255530aSDavid Matlack 		 * tdp_mmu_set_spte_atomic().
5603255530aSDavid Matlack 		 */
5613255530aSDavid Matlack 		iter->old_spte = old_spte;
5623e72c791SDavid Matlack 		return -EBUSY;
5633255530aSDavid Matlack 	}
5649a77daacSBen Gardon 
56524ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
56608889894SSean Christopherson 			      new_spte, iter->level, true);
56724ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
5689a77daacSBen Gardon 
5693e72c791SDavid Matlack 	return 0;
5709a77daacSBen Gardon }
5719a77daacSBen Gardon 
5723e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
57308f07c80SBen Gardon 					  struct tdp_iter *iter)
57408f07c80SBen Gardon {
5753e72c791SDavid Matlack 	int ret;
5763e72c791SDavid Matlack 
57708f07c80SBen Gardon 	/*
57808f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
57908f07c80SBen Gardon 	 * non-present value. This will stop other threads from
58008f07c80SBen Gardon 	 * immediately installing a present entry in its place
58108f07c80SBen Gardon 	 * before the TLBs are flushed.
58208f07c80SBen Gardon 	 */
5833e72c791SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
5843e72c791SDavid Matlack 	if (ret)
5853e72c791SDavid Matlack 		return ret;
58608f07c80SBen Gardon 
58708f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
58808f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
58908f07c80SBen Gardon 
59008f07c80SBen Gardon 	/*
59108f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
59208f07c80SBen Gardon 	 * must either wait on the MMU lock or use
593d9f6e12fSIngo Molnar 	 * tdp_mmu_set_spte_atomic which will not overwrite the
59408f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
59508f07c80SBen Gardon 	 * here since the SPTE is going from non-present
59608f07c80SBen Gardon 	 * to non-present.
59708f07c80SBen Gardon 	 */
5980e587aa7SSean Christopherson 	kvm_tdp_mmu_write_spte(iter->sptep, 0);
59908f07c80SBen Gardon 
6003e72c791SDavid Matlack 	return 0;
60108f07c80SBen Gardon }
60208f07c80SBen Gardon 
6039a77daacSBen Gardon 
6049a77daacSBen Gardon /*
605fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
606626808d1SSean Christopherson  * @kvm:	      KVM instance
607626808d1SSean Christopherson  * @as_id:	      Address space ID, i.e. regular vs. SMM
608626808d1SSean Christopherson  * @sptep:	      Pointer to the SPTE
609626808d1SSean Christopherson  * @old_spte:	      The current value of the SPTE
610626808d1SSean Christopherson  * @new_spte:	      The new value that will be set for the SPTE
611626808d1SSean Christopherson  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
612626808d1SSean Christopherson  * @level:	      The level _containing_ the SPTE (its parent PT's level)
613fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
614fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
615fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
616fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
617fe43fa2fSBen Gardon  *		      double counted.
618fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
619fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
620fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
621fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
622fe43fa2fSBen Gardon  *		      writes from being double counted.
623fe43fa2fSBen Gardon  */
624626808d1SSean Christopherson static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
625626808d1SSean Christopherson 			       u64 old_spte, u64 new_spte, gfn_t gfn, int level,
626626808d1SSean Christopherson 			       bool record_acc_track, bool record_dirty_log)
627faaf05b0SBen Gardon {
628531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
6293a9a4aa5SBen Gardon 
63008f07c80SBen Gardon 	/*
631966da62aSSean Christopherson 	 * No thread should be using this function to set SPTEs to or from the
63208f07c80SBen Gardon 	 * temporary removed SPTE value.
63308f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
63408f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
63508f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
63608f07c80SBen Gardon 	 */
637626808d1SSean Christopherson 	WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
63808f07c80SBen Gardon 
639626808d1SSean Christopherson 	kvm_tdp_mmu_write_spte(sptep, new_spte);
640faaf05b0SBen Gardon 
641626808d1SSean Christopherson 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
642626808d1SSean Christopherson 
643f8e14497SBen Gardon 	if (record_acc_track)
644626808d1SSean Christopherson 		handle_changed_spte_acc_track(old_spte, new_spte, level);
645a6a0b05dSBen Gardon 	if (record_dirty_log)
646626808d1SSean Christopherson 		handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
647626808d1SSean Christopherson 					      new_spte, level);
648626808d1SSean Christopherson }
649626808d1SSean Christopherson 
650626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
651626808d1SSean Christopherson 				     u64 new_spte, bool record_acc_track,
652626808d1SSean Christopherson 				     bool record_dirty_log)
653626808d1SSean Christopherson {
654626808d1SSean Christopherson 	WARN_ON_ONCE(iter->yielded);
655626808d1SSean Christopherson 
656626808d1SSean Christopherson 	__tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte,
657626808d1SSean Christopherson 			   new_spte, iter->gfn, iter->level,
658626808d1SSean Christopherson 			   record_acc_track, record_dirty_log);
659f8e14497SBen Gardon }
660f8e14497SBen Gardon 
661f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
662f8e14497SBen Gardon 				    u64 new_spte)
663f8e14497SBen Gardon {
664626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
665f8e14497SBen Gardon }
666f8e14497SBen Gardon 
667f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
668f8e14497SBen Gardon 						 struct tdp_iter *iter,
669f8e14497SBen Gardon 						 u64 new_spte)
670f8e14497SBen Gardon {
671626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
672a6a0b05dSBen Gardon }
673a6a0b05dSBen Gardon 
674a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
675a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
676a6a0b05dSBen Gardon 						 u64 new_spte)
677a6a0b05dSBen Gardon {
678626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
679faaf05b0SBen Gardon }
680faaf05b0SBen Gardon 
681faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
68277aa6075SDavid Matlack 	for_each_tdp_pte(_iter, _root, _start, _end)
683faaf05b0SBen Gardon 
684f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
685f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
686f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
687f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
688f8e14497SBen Gardon 			continue;					\
689f8e14497SBen Gardon 		else
690f8e14497SBen Gardon 
691bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
692b9e5603cSPaolo Bonzini 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
693bb18842eSBen Gardon 
694faaf05b0SBen Gardon /*
695e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
696e28a436cSBen Gardon  * to the scheduler.
697e28a436cSBen Gardon  *
698e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
699e139a34eSBen Gardon  * TLB flush before yielding.
700e139a34eSBen Gardon  *
7013a0f64deSSean Christopherson  * If this function yields, iter->yielded is set and the caller must skip to
7023a0f64deSSean Christopherson  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
7033a0f64deSSean Christopherson  * over the paging structures to allow the iterator to continue its traversal
7043a0f64deSSean Christopherson  * from the paging structure root.
705e28a436cSBen Gardon  *
7063a0f64deSSean Christopherson  * Returns true if this function yielded.
707e28a436cSBen Gardon  */
7083a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
7093a0f64deSSean Christopherson 							  struct tdp_iter *iter,
7103a0f64deSSean Christopherson 							  bool flush, bool shared)
711a6a0b05dSBen Gardon {
7123a0f64deSSean Christopherson 	WARN_ON(iter->yielded);
7133a0f64deSSean Christopherson 
714ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
715ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
716ed5e484bSBen Gardon 		return false;
717ed5e484bSBen Gardon 
718531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
719e139a34eSBen Gardon 		if (flush)
720e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
721e139a34eSBen Gardon 
722*bd296779SSean Christopherson 		rcu_read_unlock();
723*bd296779SSean Christopherson 
7246103bc07SBen Gardon 		if (shared)
7256103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
7266103bc07SBen Gardon 		else
727531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
7286103bc07SBen Gardon 
7297cca2d0bSBen Gardon 		rcu_read_lock();
730ed5e484bSBen Gardon 
731ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
732ed5e484bSBen Gardon 
7333a0f64deSSean Christopherson 		iter->yielded = true;
734a6a0b05dSBen Gardon 	}
735e28a436cSBen Gardon 
7363a0f64deSSean Christopherson 	return iter->yielded;
737a6a0b05dSBen Gardon }
738a6a0b05dSBen Gardon 
739e2b5b21dSSean Christopherson static inline gfn_t tdp_mmu_max_gfn_host(void)
740e2b5b21dSSean Christopherson {
741e2b5b21dSSean Christopherson 	/*
742e2b5b21dSSean Christopherson 	 * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
743e2b5b21dSSean Christopherson 	 * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
744e2b5b21dSSean Christopherson 	 * and so KVM will never install a SPTE for such addresses.
745e2b5b21dSSean Christopherson 	 */
746e2b5b21dSSean Christopherson 	return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
747e2b5b21dSSean Christopherson }
748e2b5b21dSSean Christopherson 
749e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
750e2b5b21dSSean Christopherson 			     bool shared)
751e2b5b21dSSean Christopherson {
752e2b5b21dSSean Christopherson 	bool root_is_unreachable = !refcount_read(&root->tdp_mmu_root_count);
753e2b5b21dSSean Christopherson 	struct tdp_iter iter;
754e2b5b21dSSean Christopherson 
755e2b5b21dSSean Christopherson 	gfn_t end = tdp_mmu_max_gfn_host();
756e2b5b21dSSean Christopherson 	gfn_t start = 0;
757e2b5b21dSSean Christopherson 
758e2b5b21dSSean Christopherson 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
759e2b5b21dSSean Christopherson 
760e2b5b21dSSean Christopherson 	rcu_read_lock();
761e2b5b21dSSean Christopherson 
762e2b5b21dSSean Christopherson 	/*
763e2b5b21dSSean Christopherson 	 * No need to try to step down in the iterator when zapping an entire
764e2b5b21dSSean Christopherson 	 * root, zapping an upper-level SPTE will recurse on its children.
765e2b5b21dSSean Christopherson 	 */
766e2b5b21dSSean Christopherson 	for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
767e2b5b21dSSean Christopherson retry:
768e2b5b21dSSean Christopherson 		/*
769e2b5b21dSSean Christopherson 		 * Yielding isn't allowed when zapping an unreachable root as
770e2b5b21dSSean Christopherson 		 * the root won't be processed by mmu_notifier callbacks.  When
771e2b5b21dSSean Christopherson 		 * handling an unmap/release mmu_notifier command, KVM must
772e2b5b21dSSean Christopherson 		 * drop all references to relevant pages prior to completing
773e2b5b21dSSean Christopherson 		 * the callback.  Dropping mmu_lock can result in zapping SPTEs
774e2b5b21dSSean Christopherson 		 * for an unreachable root after a relevant callback completes,
775e2b5b21dSSean Christopherson 		 * which leads to use-after-free as zapping a SPTE triggers
776e2b5b21dSSean Christopherson 		 * "writeback" of dirty/accessed bits to the SPTE's associated
777e2b5b21dSSean Christopherson 		 * struct page.
778e2b5b21dSSean Christopherson 		 */
779e2b5b21dSSean Christopherson 		if (!root_is_unreachable &&
780e2b5b21dSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
781e2b5b21dSSean Christopherson 			continue;
782e2b5b21dSSean Christopherson 
783e2b5b21dSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
784e2b5b21dSSean Christopherson 			continue;
785e2b5b21dSSean Christopherson 
786e2b5b21dSSean Christopherson 		if (!shared) {
787e2b5b21dSSean Christopherson 			tdp_mmu_set_spte(kvm, &iter, 0);
788e2b5b21dSSean Christopherson 		} else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) {
789e2b5b21dSSean Christopherson 			/*
790e2b5b21dSSean Christopherson 			 * cmpxchg() shouldn't fail if the root is unreachable.
791e2b5b21dSSean Christopherson 			 * Retry so as not to leak the page and its children.
792e2b5b21dSSean Christopherson 			 */
793e2b5b21dSSean Christopherson 			WARN_ONCE(root_is_unreachable,
794e2b5b21dSSean Christopherson 				  "Contended TDP MMU SPTE in unreachable root.");
795e2b5b21dSSean Christopherson 			goto retry;
796e2b5b21dSSean Christopherson 		}
797e2b5b21dSSean Christopherson 
798e2b5b21dSSean Christopherson 		/*
799e2b5b21dSSean Christopherson 		 * WARN if the root is invalid and is unreachable, all SPTEs
800e2b5b21dSSean Christopherson 		 * should've been zapped by kvm_tdp_mmu_zap_invalidated_roots(),
801e2b5b21dSSean Christopherson 		 * and inserting new SPTEs under an invalid root is a KVM bug.
802e2b5b21dSSean Christopherson 		 */
803e2b5b21dSSean Christopherson 		WARN_ON_ONCE(root_is_unreachable && root->role.invalid);
804e2b5b21dSSean Christopherson 	}
805e2b5b21dSSean Christopherson 
806e2b5b21dSSean Christopherson 	rcu_read_unlock();
807e2b5b21dSSean Christopherson }
808e2b5b21dSSean Christopherson 
809c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
810c10743a1SSean Christopherson {
811c10743a1SSean Christopherson 	u64 old_spte;
812c10743a1SSean Christopherson 
813c10743a1SSean Christopherson 	/*
814c10743a1SSean Christopherson 	 * This helper intentionally doesn't allow zapping a root shadow page,
815c10743a1SSean Christopherson 	 * which doesn't have a parent page table and thus no associated entry.
816c10743a1SSean Christopherson 	 */
817c10743a1SSean Christopherson 	if (WARN_ON_ONCE(!sp->ptep))
818c10743a1SSean Christopherson 		return false;
819c10743a1SSean Christopherson 
820c10743a1SSean Christopherson 	rcu_read_lock();
821c10743a1SSean Christopherson 
822c10743a1SSean Christopherson 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
823c10743a1SSean Christopherson 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) {
824c10743a1SSean Christopherson 		rcu_read_unlock();
825c10743a1SSean Christopherson 		return false;
826c10743a1SSean Christopherson 	}
827c10743a1SSean Christopherson 
828c10743a1SSean Christopherson 	__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
829c10743a1SSean Christopherson 			   sp->gfn, sp->role.level + 1, true, true);
830c10743a1SSean Christopherson 
831c10743a1SSean Christopherson 	rcu_read_unlock();
832c10743a1SSean Christopherson 
833c10743a1SSean Christopherson 	return true;
834c10743a1SSean Christopherson }
835c10743a1SSean Christopherson 
836faaf05b0SBen Gardon /*
837cf3e2642SSean Christopherson  * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
838cf3e2642SSean Christopherson  * have been cleared and a TLB flush is needed before releasing the MMU lock.
8396103bc07SBen Gardon  *
840063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
841063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
842063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
843063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
8446103bc07SBen Gardon  * operation can cause a soft lockup.
845faaf05b0SBen Gardon  */
846cf3e2642SSean Christopherson static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
847acbda82aSSean Christopherson 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
848faaf05b0SBen Gardon {
849faaf05b0SBen Gardon 	struct tdp_iter iter;
850faaf05b0SBen Gardon 
851e2b5b21dSSean Christopherson 	end = min(end, tdp_mmu_max_gfn_host());
852524a1e4eSSean Christopherson 
853acbda82aSSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
8546103bc07SBen Gardon 
8557cca2d0bSBen Gardon 	rcu_read_lock();
8567cca2d0bSBen Gardon 
857cf3e2642SSean Christopherson 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
8581af4a960SBen Gardon 		if (can_yield &&
859acbda82aSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
860a835429cSSean Christopherson 			flush = false;
8611af4a960SBen Gardon 			continue;
8621af4a960SBen Gardon 		}
8631af4a960SBen Gardon 
864cf3e2642SSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte) ||
865faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
866faaf05b0SBen Gardon 			continue;
867faaf05b0SBen Gardon 
868faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
869a835429cSSean Christopherson 		flush = true;
870faaf05b0SBen Gardon 	}
8717cca2d0bSBen Gardon 
8727cca2d0bSBen Gardon 	rcu_read_unlock();
873a835429cSSean Christopherson 	return flush;
874faaf05b0SBen Gardon }
875faaf05b0SBen Gardon 
876faaf05b0SBen Gardon /*
877faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
878faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
879faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
880faaf05b0SBen Gardon  * MMU lock.
881faaf05b0SBen Gardon  */
882cf3e2642SSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
883cf3e2642SSean Christopherson 			   bool can_yield, bool flush)
884faaf05b0SBen Gardon {
885faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
886faaf05b0SBen Gardon 
887614f6970SPaolo Bonzini 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
888cf3e2642SSean Christopherson 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, false);
889faaf05b0SBen Gardon 
890faaf05b0SBen Gardon 	return flush;
891faaf05b0SBen Gardon }
892faaf05b0SBen Gardon 
893faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
894faaf05b0SBen Gardon {
895e2b5b21dSSean Christopherson 	struct kvm_mmu_page *root;
8962b9663d8SSean Christopherson 	int i;
897faaf05b0SBen Gardon 
89877c8cd6bSSean Christopherson 	/*
89977c8cd6bSSean Christopherson 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
90077c8cd6bSSean Christopherson 	 * is being destroyed or the userspace VMM has exited.  In both cases,
90177c8cd6bSSean Christopherson 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
90277c8cd6bSSean Christopherson 	 */
903e2b5b21dSSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
904e2b5b21dSSean Christopherson 		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
905e2b5b21dSSean Christopherson 			tdp_mmu_zap_root(kvm, root, false);
906e2b5b21dSSean Christopherson 	}
907faaf05b0SBen Gardon }
908bb18842eSBen Gardon 
9094c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
9104c6654bdSBen Gardon 						  struct kvm_mmu_page *prev_root)
9114c6654bdSBen Gardon {
9124c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
9134c6654bdSBen Gardon 
9144c6654bdSBen Gardon 	if (prev_root)
9154c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
9164c6654bdSBen Gardon 						  &prev_root->link,
9174c6654bdSBen Gardon 						  typeof(*prev_root), link);
9184c6654bdSBen Gardon 	else
9194c6654bdSBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
9204c6654bdSBen Gardon 						   typeof(*next_root), link);
9214c6654bdSBen Gardon 
9224c6654bdSBen Gardon 	while (next_root && !(next_root->role.invalid &&
9234c6654bdSBen Gardon 			      refcount_read(&next_root->tdp_mmu_root_count)))
9244c6654bdSBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
9254c6654bdSBen Gardon 						  &next_root->link,
9264c6654bdSBen Gardon 						  typeof(*next_root), link);
9274c6654bdSBen Gardon 
9284c6654bdSBen Gardon 	return next_root;
9294c6654bdSBen Gardon }
9304c6654bdSBen Gardon 
9314c6654bdSBen Gardon /*
932f28e9c7fSSean Christopherson  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
933f28e9c7fSSean Christopherson  * zap" completes.  Since kvm_tdp_mmu_invalidate_all_roots() has acquired a
934f28e9c7fSSean Christopherson  * reference to each invalidated root, roots will not be freed until after this
935f28e9c7fSSean Christopherson  * function drops the gifted reference, e.g. so that vCPUs don't get stuck with
936f28e9c7fSSean Christopherson  * tearing down paging structures.
9374c6654bdSBen Gardon  */
9384c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
9394c6654bdSBen Gardon {
9404c6654bdSBen Gardon 	struct kvm_mmu_page *next_root;
9414c6654bdSBen Gardon 	struct kvm_mmu_page *root;
9424c6654bdSBen Gardon 
9434c6654bdSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
9444c6654bdSBen Gardon 
9454c6654bdSBen Gardon 	rcu_read_lock();
9464c6654bdSBen Gardon 
9474c6654bdSBen Gardon 	root = next_invalidated_root(kvm, NULL);
9484c6654bdSBen Gardon 
9494c6654bdSBen Gardon 	while (root) {
9504c6654bdSBen Gardon 		next_root = next_invalidated_root(kvm, root);
9514c6654bdSBen Gardon 
9524c6654bdSBen Gardon 		rcu_read_unlock();
9534c6654bdSBen Gardon 
9547ae5840eSSean Christopherson 		/*
9557ae5840eSSean Christopherson 		 * A TLB flush is unnecessary, invalidated roots are guaranteed
9567ae5840eSSean Christopherson 		 * to be unreachable by the guest (see kvm_tdp_mmu_put_root()
9577ae5840eSSean Christopherson 		 * for more details), and unlike the legacy MMU, no vCPU kick
9587ae5840eSSean Christopherson 		 * is needed to play nice with lockless shadow walks as the TDP
9597ae5840eSSean Christopherson 		 * MMU protects its paging structures via RCU.  Note, zapping
9607ae5840eSSean Christopherson 		 * will still flush on yield, but that's a minor performance
9617ae5840eSSean Christopherson 		 * blip and not a functional issue.
9627ae5840eSSean Christopherson 		 */
963e2b5b21dSSean Christopherson 		tdp_mmu_zap_root(kvm, root, true);
9644c6654bdSBen Gardon 
9654c6654bdSBen Gardon 		/*
9664c6654bdSBen Gardon 		 * Put the reference acquired in
9674c6654bdSBen Gardon 		 * kvm_tdp_mmu_invalidate_roots
9684c6654bdSBen Gardon 		 */
9694c6654bdSBen Gardon 		kvm_tdp_mmu_put_root(kvm, root, true);
9704c6654bdSBen Gardon 
9714c6654bdSBen Gardon 		root = next_root;
9724c6654bdSBen Gardon 
9734c6654bdSBen Gardon 		rcu_read_lock();
9744c6654bdSBen Gardon 	}
9754c6654bdSBen Gardon 
9764c6654bdSBen Gardon 	rcu_read_unlock();
9774c6654bdSBen Gardon }
9784c6654bdSBen Gardon 
979bb18842eSBen Gardon /*
980f28e9c7fSSean Christopherson  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
981f28e9c7fSSean Christopherson  * is about to be zapped, e.g. in response to a memslots update.  The caller is
982f28e9c7fSSean Christopherson  * responsible for invoking kvm_tdp_mmu_zap_invalidated_roots() to do the actual
983f28e9c7fSSean Christopherson  * zapping.
984b7cccd39SBen Gardon  *
985f28e9c7fSSean Christopherson  * Take a reference on all roots to prevent the root from being freed before it
986f28e9c7fSSean Christopherson  * is zapped by this thread.  Freeing a root is not a correctness issue, but if
987f28e9c7fSSean Christopherson  * a vCPU drops the last reference to a root prior to the root being zapped, it
988f28e9c7fSSean Christopherson  * will get stuck with tearing down the entire paging structure.
9894c6654bdSBen Gardon  *
990f28e9c7fSSean Christopherson  * Get a reference even if the root is already invalid,
991f28e9c7fSSean Christopherson  * kvm_tdp_mmu_zap_invalidated_roots() assumes it was gifted a reference to all
992f28e9c7fSSean Christopherson  * invalid roots, e.g. there's no epoch to identify roots that were invalidated
993f28e9c7fSSean Christopherson  * by a previous call.  Roots stay on the list until the last reference is
994f28e9c7fSSean Christopherson  * dropped, so even though all invalid roots are zapped, a root may not go away
995f28e9c7fSSean Christopherson  * for quite some time, e.g. if a vCPU blocks across multiple memslot updates.
996f28e9c7fSSean Christopherson  *
997f28e9c7fSSean Christopherson  * Because mmu_lock is held for write, it should be impossible to observe a
998f28e9c7fSSean Christopherson  * root with zero refcount, i.e. the list of roots cannot be stale.
9994c6654bdSBen Gardon  *
1000b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
1001b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
1002b7cccd39SBen Gardon  */
1003b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
1004b7cccd39SBen Gardon {
1005b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
1006b7cccd39SBen Gardon 
1007b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1008f28e9c7fSSean Christopherson 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1009f28e9c7fSSean Christopherson 		if (!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
1010b7cccd39SBen Gardon 			root->role.invalid = true;
1011b7cccd39SBen Gardon 	}
1012f28e9c7fSSean Christopherson }
1013b7cccd39SBen Gardon 
1014bb18842eSBen Gardon /*
1015bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
1016bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
1017bb18842eSBen Gardon  */
1018cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1019cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
1020cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
1021bb18842eSBen Gardon {
1022c435d4b7SSean Christopherson 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1023bb18842eSBen Gardon 	u64 new_spte;
102457a3e96dSKai Huang 	int ret = RET_PF_FIXED;
1025ad67e480SPaolo Bonzini 	bool wrprot = false;
1026bb18842eSBen Gardon 
10277158bee4SPaolo Bonzini 	WARN_ON(sp->role.level != fault->goal_level);
1028e710c5f6SDavid Matlack 	if (unlikely(!fault->slot))
1029bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
10309a77daacSBen Gardon 	else
103153597858SDavid Matlack 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
10322839180cSPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefetch, true,
10337158bee4SPaolo Bonzini 					 fault->map_writable, &new_spte);
1034bb18842eSBen Gardon 
1035bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
1036bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
10373e72c791SDavid Matlack 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
10389a77daacSBen Gardon 		return RET_PF_RETRY;
1039bb18842eSBen Gardon 
1040bb18842eSBen Gardon 	/*
1041bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
1042bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
1043bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
1044bb18842eSBen Gardon 	 */
1045ad67e480SPaolo Bonzini 	if (wrprot) {
1046cdc47767SPaolo Bonzini 		if (fault->write)
1047bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
1048bb18842eSBen Gardon 	}
1049bb18842eSBen Gardon 
1050bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
10519a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
10529a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
10539a77daacSBen Gardon 				     new_spte);
1054bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
10553849e092SSean Christopherson 	} else {
10569a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
10579a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
10583849e092SSean Christopherson 	}
1059bb18842eSBen Gardon 
1060857f8474SKai Huang 	/*
1061857f8474SKai Huang 	 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
1062857f8474SKai Huang 	 * consistent with legacy MMU behavior.
1063857f8474SKai Huang 	 */
1064857f8474SKai Huang 	if (ret != RET_PF_SPURIOUS)
1065bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
1066bb18842eSBen Gardon 
1067bb18842eSBen Gardon 	return ret;
1068bb18842eSBen Gardon }
1069bb18842eSBen Gardon 
1070bb18842eSBen Gardon /*
1071cb00a70bSDavid Matlack  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1072cb00a70bSDavid Matlack  * provided page table.
10737b7e1ab6SDavid Matlack  *
10747b7e1ab6SDavid Matlack  * @kvm: kvm instance
10757b7e1ab6SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
10767b7e1ab6SDavid Matlack  * @sp: The new TDP page table to install.
10777b7e1ab6SDavid Matlack  * @account_nx: True if this page table is being installed to split a
10787b7e1ab6SDavid Matlack  *              non-executable huge page.
1079cb00a70bSDavid Matlack  * @shared: This operation is running under the MMU lock in read mode.
10807b7e1ab6SDavid Matlack  *
10817b7e1ab6SDavid Matlack  * Returns: 0 if the new page table was installed. Non-0 if the page table
10827b7e1ab6SDavid Matlack  *          could not be installed (e.g. the atomic compare-exchange failed).
10837b7e1ab6SDavid Matlack  */
1084cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1085cb00a70bSDavid Matlack 			   struct kvm_mmu_page *sp, bool account_nx,
1086cb00a70bSDavid Matlack 			   bool shared)
10877b7e1ab6SDavid Matlack {
10887b7e1ab6SDavid Matlack 	u64 spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask);
1089cb00a70bSDavid Matlack 	int ret = 0;
10907b7e1ab6SDavid Matlack 
1091cb00a70bSDavid Matlack 	if (shared) {
10927b7e1ab6SDavid Matlack 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
10937b7e1ab6SDavid Matlack 		if (ret)
10947b7e1ab6SDavid Matlack 			return ret;
1095cb00a70bSDavid Matlack 	} else {
1096cb00a70bSDavid Matlack 		tdp_mmu_set_spte(kvm, iter, spte);
1097cb00a70bSDavid Matlack 	}
10987b7e1ab6SDavid Matlack 
10997b7e1ab6SDavid Matlack 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
11007b7e1ab6SDavid Matlack 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
11017b7e1ab6SDavid Matlack 	if (account_nx)
11027b7e1ab6SDavid Matlack 		account_huge_nx_page(kvm, sp);
11037b7e1ab6SDavid Matlack 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
11047b7e1ab6SDavid Matlack 
11057b7e1ab6SDavid Matlack 	return 0;
11067b7e1ab6SDavid Matlack }
11077b7e1ab6SDavid Matlack 
11087b7e1ab6SDavid Matlack /*
1109bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1110bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
1111bb18842eSBen Gardon  */
11122f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1113bb18842eSBen Gardon {
1114bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1115bb18842eSBen Gardon 	struct tdp_iter iter;
111689c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1117bb18842eSBen Gardon 	int ret;
1118bb18842eSBen Gardon 
111973a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
1120bb18842eSBen Gardon 
1121f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
11227cca2d0bSBen Gardon 
11237cca2d0bSBen Gardon 	rcu_read_lock();
11247cca2d0bSBen Gardon 
11252f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
112673a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
1127536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1128bb18842eSBen Gardon 
112973a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
1130bb18842eSBen Gardon 			break;
1131bb18842eSBen Gardon 
1132bb18842eSBen Gardon 		/*
1133bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1134bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1135bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1136bb18842eSBen Gardon 		 */
1137bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1138bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
11393e72c791SDavid Matlack 			if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
11409a77daacSBen Gardon 				break;
1141bb18842eSBen Gardon 
1142bb18842eSBen Gardon 			/*
1143bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1144bb18842eSBen Gardon 			 * because the new value informs the !present
1145bb18842eSBen Gardon 			 * path below.
1146bb18842eSBen Gardon 			 */
11470e587aa7SSean Christopherson 			iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
1148bb18842eSBen Gardon 		}
1149bb18842eSBen Gardon 
1150bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
11517b7e1ab6SDavid Matlack 			bool account_nx = fault->huge_page_disallowed &&
11527b7e1ab6SDavid Matlack 					  fault->req_level >= iter.level;
11537b7e1ab6SDavid Matlack 
1154ff76d506SKai Huang 			/*
1155c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1156ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1157ff76d506SKai Huang 			 * allocation and free.
1158ff76d506SKai Huang 			 */
1159ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1160ff76d506SKai Huang 				break;
1161ff76d506SKai Huang 
1162a82070b6SDavid Matlack 			sp = tdp_mmu_alloc_sp(vcpu);
1163a82070b6SDavid Matlack 			tdp_mmu_init_child_sp(sp, &iter);
1164a82070b6SDavid Matlack 
1165cb00a70bSDavid Matlack 			if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) {
11669a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
11679a77daacSBen Gardon 				break;
11689a77daacSBen Gardon 			}
1169bb18842eSBen Gardon 		}
1170bb18842eSBen Gardon 	}
1171bb18842eSBen Gardon 
117273a3c659SPaolo Bonzini 	if (iter.level != fault->goal_level) {
11737cca2d0bSBen Gardon 		rcu_read_unlock();
1174bb18842eSBen Gardon 		return RET_PF_RETRY;
11757cca2d0bSBen Gardon 	}
1176bb18842eSBen Gardon 
1177cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
11787cca2d0bSBen Gardon 	rcu_read_unlock();
1179bb18842eSBen Gardon 
1180bb18842eSBen Gardon 	return ret;
1181bb18842eSBen Gardon }
1182063afacdSBen Gardon 
11833039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
11843039bcc7SSean Christopherson 				 bool flush)
1185063afacdSBen Gardon {
1186cf3e2642SSean Christopherson 	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
118783b83a02SSean Christopherson 				     range->end, range->may_block, flush);
11883039bcc7SSean Christopherson }
11893039bcc7SSean Christopherson 
11903039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
11913039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
11923039bcc7SSean Christopherson 
11933039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
11943039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1195c1b91493SSean Christopherson 						   tdp_handler_t handler)
1196063afacdSBen Gardon {
1197063afacdSBen Gardon 	struct kvm_mmu_page *root;
11983039bcc7SSean Christopherson 	struct tdp_iter iter;
11993039bcc7SSean Christopherson 	bool ret = false;
1200063afacdSBen Gardon 
1201063afacdSBen Gardon 	/*
1202e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1203e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1204063afacdSBen Gardon 	 */
12053039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1206a151acecSSean Christopherson 		rcu_read_lock();
1207a151acecSSean Christopherson 
12083039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
12093039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
1210063afacdSBen Gardon 
12113039bcc7SSean Christopherson 		rcu_read_unlock();
1212a151acecSSean Christopherson 	}
1213063afacdSBen Gardon 
1214063afacdSBen Gardon 	return ret;
1215063afacdSBen Gardon }
1216063afacdSBen Gardon 
1217f8e14497SBen Gardon /*
1218f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1219f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1220f8e14497SBen Gardon  */
12213039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
12223039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1223f8e14497SBen Gardon {
1224f8e14497SBen Gardon 	u64 new_spte = 0;
1225f8e14497SBen Gardon 
12263039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
12273039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
12283039bcc7SSean Christopherson 		return false;
12297cca2d0bSBen Gardon 
12303039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1231f8e14497SBen Gardon 
1232f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
12338f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1234f8e14497SBen Gardon 	} else {
1235f8e14497SBen Gardon 		/*
1236f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1237f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1238f8e14497SBen Gardon 		 */
1239f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1240f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1241f8e14497SBen Gardon 
1242f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1243f8e14497SBen Gardon 	}
1244f8e14497SBen Gardon 
12453039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
124633dd3574SBen Gardon 
12473039bcc7SSean Christopherson 	return true;
1248f8e14497SBen Gardon }
1249f8e14497SBen Gardon 
12503039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1251f8e14497SBen Gardon {
12523039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1253f8e14497SBen Gardon }
1254f8e14497SBen Gardon 
12553039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
12563039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1257f8e14497SBen Gardon {
12583039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1259f8e14497SBen Gardon }
1260f8e14497SBen Gardon 
12613039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1262f8e14497SBen Gardon {
12633039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
12643039bcc7SSean Christopherson }
12653039bcc7SSean Christopherson 
12663039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
12673039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
12683039bcc7SSean Christopherson {
12693039bcc7SSean Christopherson 	u64 new_spte;
12703039bcc7SSean Christopherson 
12713039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
12723039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
12733039bcc7SSean Christopherson 
12743039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
12753039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
12763039bcc7SSean Christopherson 		return false;
12773039bcc7SSean Christopherson 
12783039bcc7SSean Christopherson 	/*
12793039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
12803039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
12813039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
12823039bcc7SSean Christopherson 	 * See __handle_changed_spte().
12833039bcc7SSean Christopherson 	 */
12843039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
12853039bcc7SSean Christopherson 
12863039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
12873039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
12883039bcc7SSean Christopherson 								  pte_pfn(range->pte));
12893039bcc7SSean Christopherson 
12903039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
12913039bcc7SSean Christopherson 	}
12923039bcc7SSean Christopherson 
12933039bcc7SSean Christopherson 	return true;
1294f8e14497SBen Gardon }
12951d8dd6b3SBen Gardon 
12961d8dd6b3SBen Gardon /*
12971d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
12981d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
12991d8dd6b3SBen Gardon  * notifier.
13001d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
13011d8dd6b3SBen Gardon  */
13023039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
13031d8dd6b3SBen Gardon {
130493fa50f6SSean Christopherson 	/*
130593fa50f6SSean Christopherson 	 * No need to handle the remote TLB flush under RCU protection, the
130693fa50f6SSean Christopherson 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
130793fa50f6SSean Christopherson 	 * shadow page.  See the WARN on pfn_changed in __handle_changed_spte().
130893fa50f6SSean Christopherson 	 */
130993fa50f6SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
13101d8dd6b3SBen Gardon }
13111d8dd6b3SBen Gardon 
1312a6a0b05dSBen Gardon /*
1313bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1314bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1315bedd9195SDavid Matlack  * be flushed.
1316a6a0b05dSBen Gardon  */
1317a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1318a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1319a6a0b05dSBen Gardon {
1320a6a0b05dSBen Gardon 	struct tdp_iter iter;
1321a6a0b05dSBen Gardon 	u64 new_spte;
1322a6a0b05dSBen Gardon 	bool spte_set = false;
1323a6a0b05dSBen Gardon 
13247cca2d0bSBen Gardon 	rcu_read_lock();
13257cca2d0bSBen Gardon 
1326a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1327a6a0b05dSBen Gardon 
132877aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
132924ae4cfaSBen Gardon retry:
133024ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
13311af4a960SBen Gardon 			continue;
13321af4a960SBen Gardon 
1333a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
13340f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
13350f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1336a6a0b05dSBen Gardon 			continue;
1337a6a0b05dSBen Gardon 
1338a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1339a6a0b05dSBen Gardon 
13403e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
134124ae4cfaSBen Gardon 			goto retry;
13423255530aSDavid Matlack 
1343a6a0b05dSBen Gardon 		spte_set = true;
1344a6a0b05dSBen Gardon 	}
13457cca2d0bSBen Gardon 
13467cca2d0bSBen Gardon 	rcu_read_unlock();
1347a6a0b05dSBen Gardon 	return spte_set;
1348a6a0b05dSBen Gardon }
1349a6a0b05dSBen Gardon 
1350a6a0b05dSBen Gardon /*
1351a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1352a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1353a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1354a6a0b05dSBen Gardon  */
1355269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1356269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1357a6a0b05dSBen Gardon {
1358a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1359a6a0b05dSBen Gardon 	bool spte_set = false;
1360a6a0b05dSBen Gardon 
136124ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1362a6a0b05dSBen Gardon 
1363d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1364a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1365a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1366a6a0b05dSBen Gardon 
1367a6a0b05dSBen Gardon 	return spte_set;
1368a6a0b05dSBen Gardon }
1369a6a0b05dSBen Gardon 
1370a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1371a3fe5dbdSDavid Matlack {
1372a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1373a3fe5dbdSDavid Matlack 
1374a3fe5dbdSDavid Matlack 	gfp |= __GFP_ZERO;
1375a3fe5dbdSDavid Matlack 
1376a3fe5dbdSDavid Matlack 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1377a3fe5dbdSDavid Matlack 	if (!sp)
1378a3fe5dbdSDavid Matlack 		return NULL;
1379a3fe5dbdSDavid Matlack 
1380a3fe5dbdSDavid Matlack 	sp->spt = (void *)__get_free_page(gfp);
1381a3fe5dbdSDavid Matlack 	if (!sp->spt) {
1382a3fe5dbdSDavid Matlack 		kmem_cache_free(mmu_page_header_cache, sp);
1383a3fe5dbdSDavid Matlack 		return NULL;
1384a3fe5dbdSDavid Matlack 	}
1385a3fe5dbdSDavid Matlack 
1386a3fe5dbdSDavid Matlack 	return sp;
1387a3fe5dbdSDavid Matlack }
1388a3fe5dbdSDavid Matlack 
1389a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1390cb00a70bSDavid Matlack 						       struct tdp_iter *iter,
1391cb00a70bSDavid Matlack 						       bool shared)
1392a3fe5dbdSDavid Matlack {
1393a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1394a3fe5dbdSDavid Matlack 
1395a3fe5dbdSDavid Matlack 	/*
1396a3fe5dbdSDavid Matlack 	 * Since we are allocating while under the MMU lock we have to be
1397a3fe5dbdSDavid Matlack 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1398a3fe5dbdSDavid Matlack 	 * reclaim and to avoid making any filesystem callbacks (which can end
1399a3fe5dbdSDavid Matlack 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1400a3fe5dbdSDavid Matlack 	 *
1401a3fe5dbdSDavid Matlack 	 * If this allocation fails we drop the lock and retry with reclaim
1402a3fe5dbdSDavid Matlack 	 * allowed.
1403a3fe5dbdSDavid Matlack 	 */
1404a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1405a3fe5dbdSDavid Matlack 	if (sp)
1406a3fe5dbdSDavid Matlack 		return sp;
1407a3fe5dbdSDavid Matlack 
1408a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1409cb00a70bSDavid Matlack 
1410cb00a70bSDavid Matlack 	if (shared)
1411a3fe5dbdSDavid Matlack 		read_unlock(&kvm->mmu_lock);
1412cb00a70bSDavid Matlack 	else
1413cb00a70bSDavid Matlack 		write_unlock(&kvm->mmu_lock);
1414a3fe5dbdSDavid Matlack 
1415a3fe5dbdSDavid Matlack 	iter->yielded = true;
1416a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1417a3fe5dbdSDavid Matlack 
1418cb00a70bSDavid Matlack 	if (shared)
1419a3fe5dbdSDavid Matlack 		read_lock(&kvm->mmu_lock);
1420cb00a70bSDavid Matlack 	else
1421cb00a70bSDavid Matlack 		write_lock(&kvm->mmu_lock);
1422cb00a70bSDavid Matlack 
1423a3fe5dbdSDavid Matlack 	rcu_read_lock();
1424a3fe5dbdSDavid Matlack 
1425a3fe5dbdSDavid Matlack 	return sp;
1426a3fe5dbdSDavid Matlack }
1427a3fe5dbdSDavid Matlack 
1428cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1429cb00a70bSDavid Matlack 				   struct kvm_mmu_page *sp, bool shared)
1430a3fe5dbdSDavid Matlack {
1431a3fe5dbdSDavid Matlack 	const u64 huge_spte = iter->old_spte;
1432a3fe5dbdSDavid Matlack 	const int level = iter->level;
1433a3fe5dbdSDavid Matlack 	int ret, i;
1434a3fe5dbdSDavid Matlack 
1435a3fe5dbdSDavid Matlack 	tdp_mmu_init_child_sp(sp, iter);
1436a3fe5dbdSDavid Matlack 
1437a3fe5dbdSDavid Matlack 	/*
1438a3fe5dbdSDavid Matlack 	 * No need for atomics when writing to sp->spt since the page table has
1439a3fe5dbdSDavid Matlack 	 * not been linked in yet and thus is not reachable from any other CPU.
1440a3fe5dbdSDavid Matlack 	 */
1441a3fe5dbdSDavid Matlack 	for (i = 0; i < PT64_ENT_PER_PAGE; i++)
1442a3fe5dbdSDavid Matlack 		sp->spt[i] = make_huge_page_split_spte(huge_spte, level, i);
1443a3fe5dbdSDavid Matlack 
1444a3fe5dbdSDavid Matlack 	/*
1445a3fe5dbdSDavid Matlack 	 * Replace the huge spte with a pointer to the populated lower level
1446a3fe5dbdSDavid Matlack 	 * page table. Since we are making this change without a TLB flush vCPUs
1447a3fe5dbdSDavid Matlack 	 * will see a mix of the split mappings and the original huge mapping,
1448a3fe5dbdSDavid Matlack 	 * depending on what's currently in their TLB. This is fine from a
1449a3fe5dbdSDavid Matlack 	 * correctness standpoint since the translation will be the same either
1450a3fe5dbdSDavid Matlack 	 * way.
1451a3fe5dbdSDavid Matlack 	 */
1452cb00a70bSDavid Matlack 	ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
1453a3fe5dbdSDavid Matlack 	if (ret)
1454e0b728b1SDavid Matlack 		goto out;
1455a3fe5dbdSDavid Matlack 
1456a3fe5dbdSDavid Matlack 	/*
1457a3fe5dbdSDavid Matlack 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1458a3fe5dbdSDavid Matlack 	 * are overwriting from the page stats. But we have to manually update
1459a3fe5dbdSDavid Matlack 	 * the page stats with the new present child pages.
1460a3fe5dbdSDavid Matlack 	 */
1461a3fe5dbdSDavid Matlack 	kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
1462a3fe5dbdSDavid Matlack 
1463e0b728b1SDavid Matlack out:
1464e0b728b1SDavid Matlack 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1465e0b728b1SDavid Matlack 	return ret;
1466a3fe5dbdSDavid Matlack }
1467a3fe5dbdSDavid Matlack 
1468a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1469a3fe5dbdSDavid Matlack 					 struct kvm_mmu_page *root,
1470a3fe5dbdSDavid Matlack 					 gfn_t start, gfn_t end,
1471cb00a70bSDavid Matlack 					 int target_level, bool shared)
1472a3fe5dbdSDavid Matlack {
1473a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp = NULL;
1474a3fe5dbdSDavid Matlack 	struct tdp_iter iter;
1475a3fe5dbdSDavid Matlack 	int ret = 0;
1476a3fe5dbdSDavid Matlack 
1477a3fe5dbdSDavid Matlack 	rcu_read_lock();
1478a3fe5dbdSDavid Matlack 
1479a3fe5dbdSDavid Matlack 	/*
1480a3fe5dbdSDavid Matlack 	 * Traverse the page table splitting all huge pages above the target
1481a3fe5dbdSDavid Matlack 	 * level into one lower level. For example, if we encounter a 1GB page
1482a3fe5dbdSDavid Matlack 	 * we split it into 512 2MB pages.
1483a3fe5dbdSDavid Matlack 	 *
1484a3fe5dbdSDavid Matlack 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1485a3fe5dbdSDavid Matlack 	 * to visit an SPTE before ever visiting its children, which means we
1486a3fe5dbdSDavid Matlack 	 * will correctly recursively split huge pages that are more than one
1487a3fe5dbdSDavid Matlack 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1488a3fe5dbdSDavid Matlack 	 * and then splitting each of those to 512 4KB pages).
1489a3fe5dbdSDavid Matlack 	 */
1490a3fe5dbdSDavid Matlack 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1491a3fe5dbdSDavid Matlack retry:
1492cb00a70bSDavid Matlack 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1493a3fe5dbdSDavid Matlack 			continue;
1494a3fe5dbdSDavid Matlack 
1495a3fe5dbdSDavid Matlack 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1496a3fe5dbdSDavid Matlack 			continue;
1497a3fe5dbdSDavid Matlack 
1498a3fe5dbdSDavid Matlack 		if (!sp) {
1499cb00a70bSDavid Matlack 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1500a3fe5dbdSDavid Matlack 			if (!sp) {
1501a3fe5dbdSDavid Matlack 				ret = -ENOMEM;
1502e0b728b1SDavid Matlack 				trace_kvm_mmu_split_huge_page(iter.gfn,
1503e0b728b1SDavid Matlack 							      iter.old_spte,
1504e0b728b1SDavid Matlack 							      iter.level, ret);
1505a3fe5dbdSDavid Matlack 				break;
1506a3fe5dbdSDavid Matlack 			}
1507a3fe5dbdSDavid Matlack 
1508a3fe5dbdSDavid Matlack 			if (iter.yielded)
1509a3fe5dbdSDavid Matlack 				continue;
1510a3fe5dbdSDavid Matlack 		}
1511a3fe5dbdSDavid Matlack 
1512cb00a70bSDavid Matlack 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1513a3fe5dbdSDavid Matlack 			goto retry;
1514a3fe5dbdSDavid Matlack 
1515a3fe5dbdSDavid Matlack 		sp = NULL;
1516a3fe5dbdSDavid Matlack 	}
1517a3fe5dbdSDavid Matlack 
1518a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1519a3fe5dbdSDavid Matlack 
1520a3fe5dbdSDavid Matlack 	/*
1521a3fe5dbdSDavid Matlack 	 * It's possible to exit the loop having never used the last sp if, for
1522a3fe5dbdSDavid Matlack 	 * example, a vCPU doing HugePage NX splitting wins the race and
1523a3fe5dbdSDavid Matlack 	 * installs its own sp in place of the last sp we tried to split.
1524a3fe5dbdSDavid Matlack 	 */
1525a3fe5dbdSDavid Matlack 	if (sp)
1526a3fe5dbdSDavid Matlack 		tdp_mmu_free_sp(sp);
1527a3fe5dbdSDavid Matlack 
1528a3fe5dbdSDavid Matlack 	return ret;
1529a3fe5dbdSDavid Matlack }
1530a3fe5dbdSDavid Matlack 
1531cb00a70bSDavid Matlack 
1532a3fe5dbdSDavid Matlack /*
1533a3fe5dbdSDavid Matlack  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1534a3fe5dbdSDavid Matlack  */
1535a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1536a3fe5dbdSDavid Matlack 				      const struct kvm_memory_slot *slot,
1537a3fe5dbdSDavid Matlack 				      gfn_t start, gfn_t end,
1538cb00a70bSDavid Matlack 				      int target_level, bool shared)
1539a3fe5dbdSDavid Matlack {
1540a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *root;
1541a3fe5dbdSDavid Matlack 	int r = 0;
1542a3fe5dbdSDavid Matlack 
1543cb00a70bSDavid Matlack 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1544a3fe5dbdSDavid Matlack 
15457c554d8eSPaolo Bonzini 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1546cb00a70bSDavid Matlack 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1547a3fe5dbdSDavid Matlack 		if (r) {
1548cb00a70bSDavid Matlack 			kvm_tdp_mmu_put_root(kvm, root, shared);
1549a3fe5dbdSDavid Matlack 			break;
1550a3fe5dbdSDavid Matlack 		}
1551a3fe5dbdSDavid Matlack 	}
1552a3fe5dbdSDavid Matlack }
1553a3fe5dbdSDavid Matlack 
1554a6a0b05dSBen Gardon /*
1555a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1556a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1557a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1558a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1559a6a0b05dSBen Gardon  * be flushed.
1560a6a0b05dSBen Gardon  */
1561a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1562a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1563a6a0b05dSBen Gardon {
1564a6a0b05dSBen Gardon 	struct tdp_iter iter;
1565a6a0b05dSBen Gardon 	u64 new_spte;
1566a6a0b05dSBen Gardon 	bool spte_set = false;
1567a6a0b05dSBen Gardon 
15687cca2d0bSBen Gardon 	rcu_read_lock();
15697cca2d0bSBen Gardon 
1570a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
157124ae4cfaSBen Gardon retry:
157224ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
15731af4a960SBen Gardon 			continue;
15741af4a960SBen Gardon 
15753354ef5aSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
15763354ef5aSSean Christopherson 			continue;
15773354ef5aSSean Christopherson 
1578a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1579a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1580a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1581a6a0b05dSBen Gardon 			else
1582a6a0b05dSBen Gardon 				continue;
1583a6a0b05dSBen Gardon 		} else {
1584a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1585a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1586a6a0b05dSBen Gardon 			else
1587a6a0b05dSBen Gardon 				continue;
1588a6a0b05dSBen Gardon 		}
1589a6a0b05dSBen Gardon 
15903e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
159124ae4cfaSBen Gardon 			goto retry;
15923255530aSDavid Matlack 
1593a6a0b05dSBen Gardon 		spte_set = true;
1594a6a0b05dSBen Gardon 	}
15957cca2d0bSBen Gardon 
15967cca2d0bSBen Gardon 	rcu_read_unlock();
1597a6a0b05dSBen Gardon 	return spte_set;
1598a6a0b05dSBen Gardon }
1599a6a0b05dSBen Gardon 
1600a6a0b05dSBen Gardon /*
1601a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1602a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1603a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1604a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1605a6a0b05dSBen Gardon  * be flushed.
1606a6a0b05dSBen Gardon  */
1607269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1608269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1609a6a0b05dSBen Gardon {
1610a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1611a6a0b05dSBen Gardon 	bool spte_set = false;
1612a6a0b05dSBen Gardon 
161324ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1614a6a0b05dSBen Gardon 
1615d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1616a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1617a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1618a6a0b05dSBen Gardon 
1619a6a0b05dSBen Gardon 	return spte_set;
1620a6a0b05dSBen Gardon }
1621a6a0b05dSBen Gardon 
1622a6a0b05dSBen Gardon /*
1623a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1624a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1625a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1626a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1627a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1628a6a0b05dSBen Gardon  */
1629a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1630a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1631a6a0b05dSBen Gardon {
1632a6a0b05dSBen Gardon 	struct tdp_iter iter;
1633a6a0b05dSBen Gardon 	u64 new_spte;
1634a6a0b05dSBen Gardon 
16357cca2d0bSBen Gardon 	rcu_read_lock();
16367cca2d0bSBen Gardon 
1637a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1638a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1639a6a0b05dSBen Gardon 		if (!mask)
1640a6a0b05dSBen Gardon 			break;
1641a6a0b05dSBen Gardon 
1642a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1643a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1644a6a0b05dSBen Gardon 			continue;
1645a6a0b05dSBen Gardon 
1646f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1647f1b3b06aSBen Gardon 
1648a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1649a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1650a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1651a6a0b05dSBen Gardon 			else
1652a6a0b05dSBen Gardon 				continue;
1653a6a0b05dSBen Gardon 		} else {
1654a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1655a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1656a6a0b05dSBen Gardon 			else
1657a6a0b05dSBen Gardon 				continue;
1658a6a0b05dSBen Gardon 		}
1659a6a0b05dSBen Gardon 
1660a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1661a6a0b05dSBen Gardon 	}
16627cca2d0bSBen Gardon 
16637cca2d0bSBen Gardon 	rcu_read_unlock();
1664a6a0b05dSBen Gardon }
1665a6a0b05dSBen Gardon 
1666a6a0b05dSBen Gardon /*
1667a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1668a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1669a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1670a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1671a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1672a6a0b05dSBen Gardon  */
1673a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1674a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1675a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1676a6a0b05dSBen Gardon 				       bool wrprot)
1677a6a0b05dSBen Gardon {
1678a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1679a6a0b05dSBen Gardon 
1680531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1681a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1682a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1683a6a0b05dSBen Gardon }
1684a6a0b05dSBen Gardon 
1685a6a0b05dSBen Gardon /*
168687aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
168787aa9ec9SBen Gardon  * GFNs within the slot.
168814881998SBen Gardon  */
16894b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm,
169014881998SBen Gardon 				       struct kvm_mmu_page *root,
16914b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
169214881998SBen Gardon {
16939eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
16949eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
169514881998SBen Gardon 	struct tdp_iter iter;
169614881998SBen Gardon 	kvm_pfn_t pfn;
169714881998SBen Gardon 
16987cca2d0bSBen Gardon 	rcu_read_lock();
16997cca2d0bSBen Gardon 
170014881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
17012db6f772SBen Gardon retry:
17024b85c921SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
17031af4a960SBen Gardon 			continue;
17041af4a960SBen Gardon 
170514881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
170687aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
170714881998SBen Gardon 			continue;
170814881998SBen Gardon 
170914881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
171014881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
17119eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
17129eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
171314881998SBen Gardon 			continue;
171414881998SBen Gardon 
17154b85c921SSean Christopherson 		/* Note, a successful atomic zap also does a remote TLB flush. */
17163e72c791SDavid Matlack 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
17172db6f772SBen Gardon 			goto retry;
17182db6f772SBen Gardon 	}
171914881998SBen Gardon 
17207cca2d0bSBen Gardon 	rcu_read_unlock();
172114881998SBen Gardon }
172214881998SBen Gardon 
172314881998SBen Gardon /*
172414881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
172514881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
172614881998SBen Gardon  */
17274b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
17284b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
172914881998SBen Gardon {
173014881998SBen Gardon 	struct kvm_mmu_page *root;
173114881998SBen Gardon 
17322db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
173314881998SBen Gardon 
1734d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
17354b85c921SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
173614881998SBen Gardon }
173746044f72SBen Gardon 
173846044f72SBen Gardon /*
173946044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
17405fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
174146044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
174246044f72SBen Gardon  */
174346044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
17443ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
174546044f72SBen Gardon {
174646044f72SBen Gardon 	struct tdp_iter iter;
174746044f72SBen Gardon 	u64 new_spte;
174846044f72SBen Gardon 	bool spte_set = false;
174946044f72SBen Gardon 
17503ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
17513ad93562SKeqian Zhu 
17527cca2d0bSBen Gardon 	rcu_read_lock();
17537cca2d0bSBen Gardon 
175477aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
17553ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
17563ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
17573ad93562SKeqian Zhu 			continue;
17583ad93562SKeqian Zhu 
175946044f72SBen Gardon 		new_spte = iter.old_spte &
17605fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
176146044f72SBen Gardon 
17627c8a4742SDavid Matlack 		if (new_spte == iter.old_spte)
17637c8a4742SDavid Matlack 			break;
17647c8a4742SDavid Matlack 
176546044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
176646044f72SBen Gardon 		spte_set = true;
176746044f72SBen Gardon 	}
176846044f72SBen Gardon 
17697cca2d0bSBen Gardon 	rcu_read_unlock();
17707cca2d0bSBen Gardon 
177146044f72SBen Gardon 	return spte_set;
177246044f72SBen Gardon }
177346044f72SBen Gardon 
177446044f72SBen Gardon /*
177546044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
17765fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
177746044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
177846044f72SBen Gardon  */
177946044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
17803ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
17813ad93562SKeqian Zhu 				   int min_level)
178246044f72SBen Gardon {
178346044f72SBen Gardon 	struct kvm_mmu_page *root;
178446044f72SBen Gardon 	bool spte_set = false;
178546044f72SBen Gardon 
1786531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1787a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
17883ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1789a3f15bdaSSean Christopherson 
179046044f72SBen Gardon 	return spte_set;
179146044f72SBen Gardon }
179246044f72SBen Gardon 
179395fb5b02SBen Gardon /*
179495fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
179595fb5b02SBen Gardon  * That SPTE may be non-present.
1796c5c8c7c5SDavid Matlack  *
1797c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
179895fb5b02SBen Gardon  */
179939b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
180039b4d43eSSean Christopherson 			 int *root_level)
180195fb5b02SBen Gardon {
180295fb5b02SBen Gardon 	struct tdp_iter iter;
180395fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
180495fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
18052aa07893SSean Christopherson 	int leaf = -1;
180695fb5b02SBen Gardon 
180739b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
180895fb5b02SBen Gardon 
180995fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
181095fb5b02SBen Gardon 		leaf = iter.level;
1811dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
181295fb5b02SBen Gardon 	}
181395fb5b02SBen Gardon 
181495fb5b02SBen Gardon 	return leaf;
181595fb5b02SBen Gardon }
18166e8eb206SDavid Matlack 
18176e8eb206SDavid Matlack /*
18186e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
18196e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
18206e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
18216e8eb206SDavid Matlack  *
18226e8eb206SDavid Matlack  * Contract:
18236e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
18246e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
18256e8eb206SDavid Matlack  *
18266e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
18276e8eb206SDavid Matlack  */
18286e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
18296e8eb206SDavid Matlack 					u64 *spte)
18306e8eb206SDavid Matlack {
18316e8eb206SDavid Matlack 	struct tdp_iter iter;
18326e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
18336e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
18346e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
18356e8eb206SDavid Matlack 
18366e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
18376e8eb206SDavid Matlack 		*spte = iter.old_spte;
18386e8eb206SDavid Matlack 		sptep = iter.sptep;
18396e8eb206SDavid Matlack 	}
18406e8eb206SDavid Matlack 
18416e8eb206SDavid Matlack 	/*
18426e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
18436e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
18446e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
18456e8eb206SDavid Matlack 	 * annotation.
18466e8eb206SDavid Matlack 	 *
18476e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
18486e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
18496e8eb206SDavid Matlack 	 * outside of mmu_lock.
18506e8eb206SDavid Matlack 	 */
18516e8eb206SDavid Matlack 	return rcu_dereference(sptep);
18526e8eb206SDavid Matlack }
1853