xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 55c510e2)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17a1a39128SPaolo Bonzini int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19a1a39128SPaolo Bonzini 	struct workqueue_struct *wq;
20a1a39128SPaolo Bonzini 
21897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
22a1a39128SPaolo Bonzini 		return 0;
23a1a39128SPaolo Bonzini 
24a1a39128SPaolo Bonzini 	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
25a1a39128SPaolo Bonzini 	if (!wq)
26a1a39128SPaolo Bonzini 		return -ENOMEM;
27fe5db27dSBen Gardon 
28fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
29fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
3002c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
319a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
3289c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
33a1a39128SPaolo Bonzini 	kvm->arch.tdp_mmu_zap_wq = wq;
34a1a39128SPaolo Bonzini 	return 1;
35fe5db27dSBen Gardon }
36fe5db27dSBen Gardon 
37226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */
38226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
396103bc07SBen Gardon 							     bool shared)
406103bc07SBen Gardon {
416103bc07SBen Gardon 	if (shared)
426103bc07SBen Gardon 		lockdep_assert_held_read(&kvm->mmu_lock);
436103bc07SBen Gardon 	else
446103bc07SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
45226b8c8fSSean Christopherson 
46226b8c8fSSean Christopherson 	return true;
476103bc07SBen Gardon }
486103bc07SBen Gardon 
49fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
50fe5db27dSBen Gardon {
51fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
52fe5db27dSBen Gardon 		return;
5302c00b3aSBen Gardon 
543203a56aSLv Ruyi 	/* Also waits for any queued work items.  */
5522b94c4bSPaolo Bonzini 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
5622b94c4bSPaolo Bonzini 
57524a1e4eSSean Christopherson 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
5802c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
597cca2d0bSBen Gardon 
607cca2d0bSBen Gardon 	/*
617cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
6222b94c4bSPaolo Bonzini 	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
6322b94c4bSPaolo Bonzini 	 * can call kvm_tdp_mmu_put_root and create new callbacks.
647cca2d0bSBen Gardon 	 */
657cca2d0bSBen Gardon 	rcu_barrier();
6602c00b3aSBen Gardon }
6702c00b3aSBen Gardon 
682bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
69a889ea54SBen Gardon {
702bdb3d84SBen Gardon 	free_page((unsigned long)sp->spt);
712bdb3d84SBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
72a889ea54SBen Gardon }
73a889ea54SBen Gardon 
74c0e64238SBen Gardon /*
75c0e64238SBen Gardon  * This is called through call_rcu in order to free TDP page table memory
76c0e64238SBen Gardon  * safely with respect to other kernel threads that may be operating on
77c0e64238SBen Gardon  * the memory.
78c0e64238SBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
79c0e64238SBen Gardon  * section, and freeing it after a grace period, lockless access to that
80c0e64238SBen Gardon  * memory won't use it after it is freed.
81c0e64238SBen Gardon  */
82c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
83a889ea54SBen Gardon {
84c0e64238SBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
85c0e64238SBen Gardon 					       rcu_head);
86a889ea54SBen Gardon 
87c0e64238SBen Gardon 	tdp_mmu_free_sp(sp);
88a889ea54SBen Gardon }
89a889ea54SBen Gardon 
90e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
91e2b5b21dSSean Christopherson 			     bool shared);
92e2b5b21dSSean Christopherson 
9322b94c4bSPaolo Bonzini static void tdp_mmu_zap_root_work(struct work_struct *work)
9422b94c4bSPaolo Bonzini {
9522b94c4bSPaolo Bonzini 	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
9622b94c4bSPaolo Bonzini 						 tdp_mmu_async_work);
9722b94c4bSPaolo Bonzini 	struct kvm *kvm = root->tdp_mmu_async_data;
9822b94c4bSPaolo Bonzini 
9922b94c4bSPaolo Bonzini 	read_lock(&kvm->mmu_lock);
10022b94c4bSPaolo Bonzini 
10122b94c4bSPaolo Bonzini 	/*
10222b94c4bSPaolo Bonzini 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
10322b94c4bSPaolo Bonzini 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
10422b94c4bSPaolo Bonzini 	 * to a different pCPU.  Note, the local TLB flush on reuse also
10522b94c4bSPaolo Bonzini 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
10622b94c4bSPaolo Bonzini 	 * intermediate paging structures, that may be zapped, as such entries
10722b94c4bSPaolo Bonzini 	 * are associated with the ASID on both VMX and SVM.
10822b94c4bSPaolo Bonzini 	 */
10922b94c4bSPaolo Bonzini 	tdp_mmu_zap_root(kvm, root, true);
11022b94c4bSPaolo Bonzini 
11122b94c4bSPaolo Bonzini 	/*
11222b94c4bSPaolo Bonzini 	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
11322b94c4bSPaolo Bonzini 	 * avoiding an infinite loop.  By design, the root is reachable while
11422b94c4bSPaolo Bonzini 	 * it's being asynchronously zapped, thus a different task can put its
11522b94c4bSPaolo Bonzini 	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
11622b94c4bSPaolo Bonzini 	 * asynchronously zapped root is unavoidable.
11722b94c4bSPaolo Bonzini 	 */
11822b94c4bSPaolo Bonzini 	kvm_tdp_mmu_put_root(kvm, root, true);
11922b94c4bSPaolo Bonzini 
12022b94c4bSPaolo Bonzini 	read_unlock(&kvm->mmu_lock);
12122b94c4bSPaolo Bonzini }
12222b94c4bSPaolo Bonzini 
12322b94c4bSPaolo Bonzini static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
12422b94c4bSPaolo Bonzini {
12522b94c4bSPaolo Bonzini 	root->tdp_mmu_async_data = kvm;
12622b94c4bSPaolo Bonzini 	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
12722b94c4bSPaolo Bonzini 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
12822b94c4bSPaolo Bonzini }
12922b94c4bSPaolo Bonzini 
1308351779cSPaolo Bonzini static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
1318351779cSPaolo Bonzini {
1328351779cSPaolo Bonzini 	union kvm_mmu_page_role role = page->role;
1338351779cSPaolo Bonzini 	role.invalid = true;
1348351779cSPaolo Bonzini 
1358351779cSPaolo Bonzini 	/* No need to use cmpxchg, only the invalid bit can change.  */
1368351779cSPaolo Bonzini 	role.word = xchg(&page->role.word, role.word);
1378351779cSPaolo Bonzini 	return role.invalid;
1388351779cSPaolo Bonzini }
1398351779cSPaolo Bonzini 
1406103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
1416103bc07SBen Gardon 			  bool shared)
1422bdb3d84SBen Gardon {
1436103bc07SBen Gardon 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1442bdb3d84SBen Gardon 
14511cccf5cSBen Gardon 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
1462bdb3d84SBen Gardon 		return;
1472bdb3d84SBen Gardon 
1482bdb3d84SBen Gardon 	WARN_ON(!root->tdp_mmu_page);
1492bdb3d84SBen Gardon 
1508351779cSPaolo Bonzini 	/*
1518351779cSPaolo Bonzini 	 * The root now has refcount=0.  It is valid, but readers already
1528351779cSPaolo Bonzini 	 * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
1538351779cSPaolo Bonzini 	 * rejects it.  This remains true for the rest of the execution
1548351779cSPaolo Bonzini 	 * of this function, because readers visit valid roots only
1558351779cSPaolo Bonzini 	 * (except for tdp_mmu_zap_root_work(), which however
1568351779cSPaolo Bonzini 	 * does not acquire any reference itself).
1578351779cSPaolo Bonzini 	 *
1588351779cSPaolo Bonzini 	 * Even though there are flows that need to visit all roots for
1598351779cSPaolo Bonzini 	 * correctness, they all take mmu_lock for write, so they cannot yet
1608351779cSPaolo Bonzini 	 * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
1618351779cSPaolo Bonzini 	 * since the root still has refcount=0.
1628351779cSPaolo Bonzini 	 *
1638351779cSPaolo Bonzini 	 * However, tdp_mmu_zap_root can yield, and writers do not expect to
1648351779cSPaolo Bonzini 	 * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
1658351779cSPaolo Bonzini 	 * So the root temporarily gets an extra reference, going to refcount=1
1668351779cSPaolo Bonzini 	 * while staying invalid.  Readers still cannot acquire any reference;
1678351779cSPaolo Bonzini 	 * but writers are now allowed to run if tdp_mmu_zap_root yields and
168efd995daSPaolo Bonzini 	 * they might take an extra reference if they themselves yield.
169efd995daSPaolo Bonzini 	 * Therefore, when the reference is given back by the worker,
1708351779cSPaolo Bonzini 	 * there is no guarantee that the refcount is still 1.  If not, whoever
1718351779cSPaolo Bonzini 	 * puts the last reference will free the page, but they will not have to
1728351779cSPaolo Bonzini 	 * zap the root because a root cannot go from invalid to valid.
1738351779cSPaolo Bonzini 	 */
1748351779cSPaolo Bonzini 	if (!kvm_tdp_root_mark_invalid(root)) {
1758351779cSPaolo Bonzini 		refcount_set(&root->tdp_mmu_root_count, 1);
1768351779cSPaolo Bonzini 
1778351779cSPaolo Bonzini 		/*
178efd995daSPaolo Bonzini 		 * Zapping the root in a worker is not just "nice to have";
179efd995daSPaolo Bonzini 		 * it is required because kvm_tdp_mmu_invalidate_all_roots()
180efd995daSPaolo Bonzini 		 * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
181efd995daSPaolo Bonzini 		 * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
182efd995daSPaolo Bonzini 		 * might return with some roots not zapped yet.
1838351779cSPaolo Bonzini 		 */
184efd995daSPaolo Bonzini 		tdp_mmu_schedule_zap_root(kvm, root);
1858351779cSPaolo Bonzini 		return;
1868351779cSPaolo Bonzini 	}
1878351779cSPaolo Bonzini 
188c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
189c0e64238SBen Gardon 	list_del_rcu(&root->link);
190c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
191c0e64238SBen Gardon 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
192a889ea54SBen Gardon }
193a889ea54SBen Gardon 
194cfc10997SBen Gardon /*
195d62007edSSean Christopherson  * Returns the next root after @prev_root (or the first root if @prev_root is
196d62007edSSean Christopherson  * NULL).  A reference to the returned root is acquired, and the reference to
197d62007edSSean Christopherson  * @prev_root is released (the caller obviously must hold a reference to
198d62007edSSean Christopherson  * @prev_root if it's non-NULL).
199d62007edSSean Christopherson  *
200d62007edSSean Christopherson  * If @only_valid is true, invalid roots are skipped.
201d62007edSSean Christopherson  *
202d62007edSSean Christopherson  * Returns NULL if the end of tdp_mmu_roots was reached.
203cfc10997SBen Gardon  */
204cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
2056103bc07SBen Gardon 					      struct kvm_mmu_page *prev_root,
206d62007edSSean Christopherson 					      bool shared, bool only_valid)
207a889ea54SBen Gardon {
208a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
209a889ea54SBen Gardon 
210c0e64238SBen Gardon 	rcu_read_lock();
211c0e64238SBen Gardon 
212cfc10997SBen Gardon 	if (prev_root)
213c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
214c0e64238SBen Gardon 						  &prev_root->link,
215c0e64238SBen Gardon 						  typeof(*prev_root), link);
216cfc10997SBen Gardon 	else
217c0e64238SBen Gardon 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
218cfc10997SBen Gardon 						   typeof(*next_root), link);
219cfc10997SBen Gardon 
22004dc4e6cSSean Christopherson 	while (next_root) {
221d62007edSSean Christopherson 		if ((!only_valid || !next_root->role.invalid) &&
222ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(next_root))
22304dc4e6cSSean Christopherson 			break;
22404dc4e6cSSean Christopherson 
225c0e64238SBen Gardon 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
226c0e64238SBen Gardon 				&next_root->link, typeof(*next_root), link);
22704dc4e6cSSean Christopherson 	}
228fb101293SBen Gardon 
229c0e64238SBen Gardon 	rcu_read_unlock();
230cfc10997SBen Gardon 
231cfc10997SBen Gardon 	if (prev_root)
2326103bc07SBen Gardon 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
233cfc10997SBen Gardon 
234a889ea54SBen Gardon 	return next_root;
235a889ea54SBen Gardon }
236a889ea54SBen Gardon 
237a889ea54SBen Gardon /*
238a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
239a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
240a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
241a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
2426103bc07SBen Gardon  *
2436103bc07SBen Gardon  * If shared is set, this function is operating under the MMU lock in read
2446103bc07SBen Gardon  * mode. In the unlikely event that this thread must free a root, the lock
2456103bc07SBen Gardon  * will be temporarily dropped and reacquired in write mode.
246a889ea54SBen Gardon  */
247d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
248d62007edSSean Christopherson 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
249cfc10997SBen Gardon 	     _root;								\
250d62007edSSean Christopherson 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
251614f6970SPaolo Bonzini 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
252614f6970SPaolo Bonzini 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
253a3f15bdaSSean Christopherson 		} else
254a889ea54SBen Gardon 
255d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
256d62007edSSean Christopherson 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
257d62007edSSean Christopherson 
258614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
259614f6970SPaolo Bonzini 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
260d62007edSSean Christopherson 
261226b8c8fSSean Christopherson /*
262226b8c8fSSean Christopherson  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
263226b8c8fSSean Christopherson  * the implication being that any flow that holds mmu_lock for read is
264226b8c8fSSean Christopherson  * inherently yield-friendly and should use the yield-safe variant above.
265226b8c8fSSean Christopherson  * Holding mmu_lock for write obviates the need for RCU protection as the list
266226b8c8fSSean Christopherson  * is guaranteed to be stable.
267226b8c8fSSean Christopherson  */
268a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
269226b8c8fSSean Christopherson 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
270226b8c8fSSean Christopherson 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
271226b8c8fSSean Christopherson 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
272a3f15bdaSSean Christopherson 		} else
27302c00b3aSBen Gardon 
274a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
27502c00b3aSBen Gardon {
27602c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
27702c00b3aSBen Gardon 
27802c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
27902c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
280a82070b6SDavid Matlack 
281a82070b6SDavid Matlack 	return sp;
282a82070b6SDavid Matlack }
283a82070b6SDavid Matlack 
284c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
285c10743a1SSean Christopherson 			    gfn_t gfn, union kvm_mmu_page_role role)
286a82070b6SDavid Matlack {
287*55c510e2SSean Christopherson 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
288428e9216SSean Christopherson 
28902c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
29002c00b3aSBen Gardon 
291a3aca4deSDavid Matlack 	sp->role = role;
29202c00b3aSBen Gardon 	sp->gfn = gfn;
293c10743a1SSean Christopherson 	sp->ptep = sptep;
29402c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
29502c00b3aSBen Gardon 
29633dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
29702c00b3aSBen Gardon }
29802c00b3aSBen Gardon 
299a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
300a3aca4deSDavid Matlack 				  struct tdp_iter *iter)
301a3aca4deSDavid Matlack {
302a3aca4deSDavid Matlack 	struct kvm_mmu_page *parent_sp;
303a3aca4deSDavid Matlack 	union kvm_mmu_page_role role;
304a3aca4deSDavid Matlack 
305a3aca4deSDavid Matlack 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
306a3aca4deSDavid Matlack 
307a3aca4deSDavid Matlack 	role = parent_sp->role;
308a3aca4deSDavid Matlack 	role.level--;
309a3aca4deSDavid Matlack 
310c10743a1SSean Christopherson 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
311a3aca4deSDavid Matlack }
312a3aca4deSDavid Matlack 
3136e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
31402c00b3aSBen Gardon {
3157a458f0eSPaolo Bonzini 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
31602c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
31702c00b3aSBen Gardon 	struct kvm_mmu_page *root;
31802c00b3aSBen Gardon 
3196e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
32002c00b3aSBen Gardon 
32104dc4e6cSSean Christopherson 	/*
32204dc4e6cSSean Christopherson 	 * Check for an existing root before allocating a new one.  Note, the
32304dc4e6cSSean Christopherson 	 * role check prevents consuming an invalid root.
32404dc4e6cSSean Christopherson 	 */
325a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
326fb101293SBen Gardon 		if (root->role.word == role.word &&
327ad6d6b94SJinrong Liang 		    kvm_tdp_mmu_get_root(root))
3286e6ec584SSean Christopherson 			goto out;
32902c00b3aSBen Gardon 	}
33002c00b3aSBen Gardon 
331a82070b6SDavid Matlack 	root = tdp_mmu_alloc_sp(vcpu);
332c10743a1SSean Christopherson 	tdp_mmu_init_sp(root, NULL, 0, role);
333a82070b6SDavid Matlack 
33411cccf5cSBen Gardon 	refcount_set(&root->tdp_mmu_root_count, 1);
33502c00b3aSBen Gardon 
336c0e64238SBen Gardon 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
337c0e64238SBen Gardon 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
338c0e64238SBen Gardon 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
33902c00b3aSBen Gardon 
3406e6ec584SSean Christopherson out:
34102c00b3aSBen Gardon 	return __pa(root->spt);
342fe5db27dSBen Gardon }
3432f2fad08SBen Gardon 
3442f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3459a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
3469a77daacSBen Gardon 				bool shared);
3472f2fad08SBen Gardon 
348f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
349f8e14497SBen Gardon {
350f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
351f8e14497SBen Gardon 		return;
352f8e14497SBen Gardon 
353f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
35464bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
35564bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
356f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
357f8e14497SBen Gardon }
358f8e14497SBen Gardon 
359a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
360a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
361a6a0b05dSBen Gardon {
362a6a0b05dSBen Gardon 	bool pfn_changed;
363a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
364a6a0b05dSBen Gardon 
365a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
366a6a0b05dSBen Gardon 		return;
367a6a0b05dSBen Gardon 
368a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
369a6a0b05dSBen Gardon 
370a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
371a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
372a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
373fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
374a6a0b05dSBen Gardon 	}
375a6a0b05dSBen Gardon }
376a6a0b05dSBen Gardon 
37743a063caSYosry Ahmed static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
37843a063caSYosry Ahmed {
37943a063caSYosry Ahmed 	kvm_account_pgtable_pages((void *)sp->spt, +1);
38043a063caSYosry Ahmed }
38143a063caSYosry Ahmed 
38243a063caSYosry Ahmed static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
38343a063caSYosry Ahmed {
38443a063caSYosry Ahmed 	kvm_account_pgtable_pages((void *)sp->spt, -1);
38543a063caSYosry Ahmed }
38643a063caSYosry Ahmed 
3872f2fad08SBen Gardon /**
388c298a30cSDavid Matlack  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
389a9442f59SBen Gardon  *
390a9442f59SBen Gardon  * @kvm: kvm instance
391a9442f59SBen Gardon  * @sp: the page to be removed
3929a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3939a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3949a77daacSBen Gardon  *	    threads that might be adding or removing pages.
395a9442f59SBen Gardon  */
396c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
3979a77daacSBen Gardon 			      bool shared)
398a9442f59SBen Gardon {
39943a063caSYosry Ahmed 	tdp_unaccount_mmu_page(kvm, sp);
4009a77daacSBen Gardon 	if (shared)
4019a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
4029a77daacSBen Gardon 	else
403a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
404a9442f59SBen Gardon 
405a9442f59SBen Gardon 	list_del(&sp->link);
406*55c510e2SSean Christopherson 	if (sp->nx_huge_page_disallowed)
407*55c510e2SSean Christopherson 		unaccount_nx_huge_page(kvm, sp);
4089a77daacSBen Gardon 
4099a77daacSBen Gardon 	if (shared)
4109a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
411a9442f59SBen Gardon }
412a9442f59SBen Gardon 
413a9442f59SBen Gardon /**
4140f53dfa3SDavid Matlack  * handle_removed_pt() - handle a page table removed from the TDP structure
415a066e61fSBen Gardon  *
416a066e61fSBen Gardon  * @kvm: kvm instance
417a066e61fSBen Gardon  * @pt: the page removed from the paging structure
4189a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
4199a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
4209a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
421a066e61fSBen Gardon  *
422a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
423a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
42470fb3e41SBen Gardon  *
42570fb3e41SBen Gardon  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
42670fb3e41SBen Gardon  * protection. Since this thread removed it from the paging structure,
42770fb3e41SBen Gardon  * this thread will be responsible for ensuring the page is freed. Hence the
42870fb3e41SBen Gardon  * early rcu_dereferences in the function.
429a066e61fSBen Gardon  */
4300f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
431a066e61fSBen Gardon {
43270fb3e41SBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
433a066e61fSBen Gardon 	int level = sp->role.level;
434e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
435a066e61fSBen Gardon 	int i;
436a066e61fSBen Gardon 
437a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
438a066e61fSBen Gardon 
439c298a30cSDavid Matlack 	tdp_mmu_unlink_sp(kvm, sp, shared);
440a066e61fSBen Gardon 
4412ca3129eSSean Christopherson 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
442ba3a6120SSean Christopherson 		tdp_ptep_t sptep = pt + i;
443574c3c55SBen Gardon 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
444ba3a6120SSean Christopherson 		u64 old_spte;
4459a77daacSBen Gardon 
4469a77daacSBen Gardon 		if (shared) {
447e25f0e0cSBen Gardon 			/*
448e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
449e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
450e25f0e0cSBen Gardon 			 * already marked as removed then another thread
451e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
452e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
453e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
454e25f0e0cSBen Gardon 			 */
455e25f0e0cSBen Gardon 			for (;;) {
456ba3a6120SSean Christopherson 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
457ba3a6120SSean Christopherson 				if (!is_removed_spte(old_spte))
458e25f0e0cSBen Gardon 					break;
459e25f0e0cSBen Gardon 				cpu_relax();
460e25f0e0cSBen Gardon 			}
4619a77daacSBen Gardon 		} else {
4628df9f1afSSean Christopherson 			/*
4638df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
4648df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
4658df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
4668df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
4678df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
4688df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
4698df9f1afSSean Christopherson 			 * unreachable.
4708df9f1afSSean Christopherson 			 */
471ba3a6120SSean Christopherson 			old_spte = kvm_tdp_mmu_read_spte(sptep);
472ba3a6120SSean Christopherson 			if (!is_shadow_present_pte(old_spte))
4738df9f1afSSean Christopherson 				continue;
474e25f0e0cSBen Gardon 
475e25f0e0cSBen Gardon 			/*
476ba3a6120SSean Christopherson 			 * Use the common helper instead of a raw WRITE_ONCE as
477ba3a6120SSean Christopherson 			 * the SPTE needs to be updated atomically if it can be
478ba3a6120SSean Christopherson 			 * modified by a different vCPU outside of mmu_lock.
479ba3a6120SSean Christopherson 			 * Even though the parent SPTE is !PRESENT, the TLB
480ba3a6120SSean Christopherson 			 * hasn't yet been flushed, and both Intel and AMD
481ba3a6120SSean Christopherson 			 * document that A/D assists can use upper-level PxE
482ba3a6120SSean Christopherson 			 * entries that are cached in the TLB, i.e. the CPU can
483ba3a6120SSean Christopherson 			 * still access the page and mark it dirty.
484ba3a6120SSean Christopherson 			 *
485ba3a6120SSean Christopherson 			 * No retry is needed in the atomic update path as the
486ba3a6120SSean Christopherson 			 * sole concern is dropping a Dirty bit, i.e. no other
487ba3a6120SSean Christopherson 			 * task can zap/remove the SPTE as mmu_lock is held for
488ba3a6120SSean Christopherson 			 * write.  Marking the SPTE as a removed SPTE is not
489ba3a6120SSean Christopherson 			 * strictly necessary for the same reason, but using
490ba3a6120SSean Christopherson 			 * the remove SPTE value keeps the shared/exclusive
491ba3a6120SSean Christopherson 			 * paths consistent and allows the handle_changed_spte()
492ba3a6120SSean Christopherson 			 * call below to hardcode the new value to REMOVED_SPTE.
493ba3a6120SSean Christopherson 			 *
494ba3a6120SSean Christopherson 			 * Note, even though dropping a Dirty bit is the only
495ba3a6120SSean Christopherson 			 * scenario where a non-atomic update could result in a
496ba3a6120SSean Christopherson 			 * functional bug, simply checking the Dirty bit isn't
497ba3a6120SSean Christopherson 			 * sufficient as a fast page fault could read the upper
498ba3a6120SSean Christopherson 			 * level SPTE before it is zapped, and then make this
499ba3a6120SSean Christopherson 			 * target SPTE writable, resume the guest, and set the
500ba3a6120SSean Christopherson 			 * Dirty bit between reading the SPTE above and writing
501ba3a6120SSean Christopherson 			 * it here.
502e25f0e0cSBen Gardon 			 */
503ba3a6120SSean Christopherson 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
504ba3a6120SSean Christopherson 							  REMOVED_SPTE, level);
5059a77daacSBen Gardon 		}
506e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
507ba3a6120SSean Christopherson 				    old_spte, REMOVED_SPTE, level, shared);
508a066e61fSBen Gardon 	}
509a066e61fSBen Gardon 
5107cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
511a066e61fSBen Gardon }
512a066e61fSBen Gardon 
513a066e61fSBen Gardon /**
5147f6231a3SKai Huang  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
5152f2fad08SBen Gardon  * @kvm: kvm instance
5162f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
5172f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
5182f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
5192f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
5202f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
5219a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
5229a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
5239a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
5242f2fad08SBen Gardon  *
5252f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
5262f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
5272f2fad08SBen Gardon  */
5282f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
5299a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
5309a77daacSBen Gardon 				  bool shared)
5312f2fad08SBen Gardon {
5322f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
5332f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
5342f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
5352f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
5362f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
5372f2fad08SBen Gardon 
5382f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
5392f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
540764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
5412f2fad08SBen Gardon 
5422f2fad08SBen Gardon 	/*
5432f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
5442f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
5452f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
546d9f6e12fSIngo Molnar 	 * present leaf SPTE pointing to a different PFN. A notifier handler
5472f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
5482f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
5492f2fad08SBen Gardon 	 * thread before replacement.
5502f2fad08SBen Gardon 	 */
5512f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
5522f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
5532f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
5542f2fad08SBen Gardon 		       "different PFN!\n"
5552f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
5562f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
5572f2fad08SBen Gardon 
5582f2fad08SBen Gardon 		/*
5592f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
560d9f6e12fSIngo Molnar 		 * corruption.
5612f2fad08SBen Gardon 		 */
5622f2fad08SBen Gardon 		BUG();
5632f2fad08SBen Gardon 	}
5642f2fad08SBen Gardon 
5652f2fad08SBen Gardon 	if (old_spte == new_spte)
5662f2fad08SBen Gardon 		return;
5672f2fad08SBen Gardon 
568b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
569b9a98c34SBen Gardon 
570115111efSDavid Matlack 	if (is_leaf)
571115111efSDavid Matlack 		check_spte_writable_invariants(new_spte);
572115111efSDavid Matlack 
5732f2fad08SBen Gardon 	/*
5742f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
5752f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
5762f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
5772f2fad08SBen Gardon 	 */
5782f2fad08SBen Gardon 	if (!was_present && !is_present) {
5792f2fad08SBen Gardon 		/*
58008f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
58108f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
58208f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
58308f07c80SBen Gardon 		 * are nonpresent.
5842f2fad08SBen Gardon 		 */
58508f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
58608f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
58708f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
5882f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
5892f2fad08SBen Gardon 			       "should not be replaced with another,\n"
5902f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
59108f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
59208f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
5932f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
5942f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
5952f2fad08SBen Gardon 		return;
5962f2fad08SBen Gardon 	}
5972f2fad08SBen Gardon 
59871f51d2cSMingwei Zhang 	if (is_leaf != was_leaf)
59971f51d2cSMingwei Zhang 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
6002f2fad08SBen Gardon 
6012f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
60264bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
6032f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
6042f2fad08SBen Gardon 
6052f2fad08SBen Gardon 	/*
6062f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
607c8e5a0d0SSean Christopherson 	 * the paging structure.  Note the WARN on the PFN changing without the
608c8e5a0d0SSean Christopherson 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
609c8e5a0d0SSean Christopherson 	 * pages are kernel allocations and should never be migrated.
6102f2fad08SBen Gardon 	 */
611c8e5a0d0SSean Christopherson 	if (was_present && !was_leaf &&
612c8e5a0d0SSean Christopherson 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
6130f53dfa3SDavid Matlack 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
6142f2fad08SBen Gardon }
6152f2fad08SBen Gardon 
6162f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
6179a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
6189a77daacSBen Gardon 				bool shared)
6192f2fad08SBen Gardon {
6209a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
6219a77daacSBen Gardon 			      shared);
622f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
623a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
624a6a0b05dSBen Gardon 				      new_spte, level);
6252f2fad08SBen Gardon }
626faaf05b0SBen Gardon 
627fe43fa2fSBen Gardon /*
6286ccf4438SPaolo Bonzini  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
6296ccf4438SPaolo Bonzini  * and handle the associated bookkeeping.  Do not mark the page dirty
63024ae4cfaSBen Gardon  * in KVM's dirty bitmaps.
6319a77daacSBen Gardon  *
6323255530aSDavid Matlack  * If setting the SPTE fails because it has changed, iter->old_spte will be
6333255530aSDavid Matlack  * refreshed to the current value of the spte.
6343255530aSDavid Matlack  *
6359a77daacSBen Gardon  * @kvm: kvm instance
6369a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
6379a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
6383e72c791SDavid Matlack  * Return:
6393e72c791SDavid Matlack  * * 0      - If the SPTE was set.
6403e72c791SDavid Matlack  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
6413e72c791SDavid Matlack  *            no side-effects other than setting iter->old_spte to the last
6423e72c791SDavid Matlack  *            known value of the spte.
6439a77daacSBen Gardon  */
6443e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
6459a77daacSBen Gardon 					  struct tdp_iter *iter,
6469a77daacSBen Gardon 					  u64 new_spte)
6479a77daacSBen Gardon {
6483255530aSDavid Matlack 	u64 *sptep = rcu_dereference(iter->sptep);
6493255530aSDavid Matlack 
650396fd74dSSean Christopherson 	/*
651396fd74dSSean Christopherson 	 * The caller is responsible for ensuring the old SPTE is not a REMOVED
652396fd74dSSean Christopherson 	 * SPTE.  KVM should never attempt to zap or manipulate a REMOVED SPTE,
653396fd74dSSean Christopherson 	 * and pre-checking before inserting a new SPTE is advantageous as it
654396fd74dSSean Christopherson 	 * avoids unnecessary work.
655396fd74dSSean Christopherson 	 */
656396fd74dSSean Christopherson 	WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
6573a0f64deSSean Christopherson 
6589a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
6599a77daacSBen Gardon 
66008f07c80SBen Gardon 	/*
6616e8eb206SDavid Matlack 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
6626e8eb206SDavid Matlack 	 * does not hold the mmu_lock.
6636e8eb206SDavid Matlack 	 */
664aee98a68SUros Bizjak 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
6653e72c791SDavid Matlack 		return -EBUSY;
6669a77daacSBen Gardon 
66724ae4cfaSBen Gardon 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
66808889894SSean Christopherson 			      new_spte, iter->level, true);
66924ae4cfaSBen Gardon 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
6709a77daacSBen Gardon 
6713e72c791SDavid Matlack 	return 0;
6729a77daacSBen Gardon }
6739a77daacSBen Gardon 
6743e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
67508f07c80SBen Gardon 					  struct tdp_iter *iter)
67608f07c80SBen Gardon {
6773e72c791SDavid Matlack 	int ret;
6783e72c791SDavid Matlack 
67908f07c80SBen Gardon 	/*
68008f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
68108f07c80SBen Gardon 	 * non-present value. This will stop other threads from
68208f07c80SBen Gardon 	 * immediately installing a present entry in its place
68308f07c80SBen Gardon 	 * before the TLBs are flushed.
68408f07c80SBen Gardon 	 */
6853e72c791SDavid Matlack 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
6863e72c791SDavid Matlack 	if (ret)
6873e72c791SDavid Matlack 		return ret;
68808f07c80SBen Gardon 
68908f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
69008f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
69108f07c80SBen Gardon 
69208f07c80SBen Gardon 	/*
693ba3a6120SSean Christopherson 	 * No other thread can overwrite the removed SPTE as they must either
694ba3a6120SSean Christopherson 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
695ba3a6120SSean Christopherson 	 * overwrite the special removed SPTE value. No bookkeeping is needed
696ba3a6120SSean Christopherson 	 * here since the SPTE is going from non-present to non-present.  Use
697ba3a6120SSean Christopherson 	 * the raw write helper to avoid an unnecessary check on volatile bits.
69808f07c80SBen Gardon 	 */
699ba3a6120SSean Christopherson 	__kvm_tdp_mmu_write_spte(iter->sptep, 0);
70008f07c80SBen Gardon 
7013e72c791SDavid Matlack 	return 0;
70208f07c80SBen Gardon }
70308f07c80SBen Gardon 
7049a77daacSBen Gardon 
7059a77daacSBen Gardon /*
706fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
707626808d1SSean Christopherson  * @kvm:	      KVM instance
708626808d1SSean Christopherson  * @as_id:	      Address space ID, i.e. regular vs. SMM
709626808d1SSean Christopherson  * @sptep:	      Pointer to the SPTE
710626808d1SSean Christopherson  * @old_spte:	      The current value of the SPTE
711626808d1SSean Christopherson  * @new_spte:	      The new value that will be set for the SPTE
712626808d1SSean Christopherson  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
713626808d1SSean Christopherson  * @level:	      The level _containing_ the SPTE (its parent PT's level)
714fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
715fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
716fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
717fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
718fe43fa2fSBen Gardon  *		      double counted.
719fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
720fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
721fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
722fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
723fe43fa2fSBen Gardon  *		      writes from being double counted.
724ba3a6120SSean Christopherson  *
725ba3a6120SSean Christopherson  * Returns the old SPTE value, which _may_ be different than @old_spte if the
726ba3a6120SSean Christopherson  * SPTE had voldatile bits.
727fe43fa2fSBen Gardon  */
728ba3a6120SSean Christopherson static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
729626808d1SSean Christopherson 			      u64 old_spte, u64 new_spte, gfn_t gfn, int level,
730626808d1SSean Christopherson 			      bool record_acc_track, bool record_dirty_log)
731faaf05b0SBen Gardon {
732531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
7333a9a4aa5SBen Gardon 
73408f07c80SBen Gardon 	/*
735966da62aSSean Christopherson 	 * No thread should be using this function to set SPTEs to or from the
73608f07c80SBen Gardon 	 * temporary removed SPTE value.
73708f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
73808f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
73908f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
74008f07c80SBen Gardon 	 */
741626808d1SSean Christopherson 	WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
74208f07c80SBen Gardon 
743ba3a6120SSean Christopherson 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
744faaf05b0SBen Gardon 
745626808d1SSean Christopherson 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
746626808d1SSean Christopherson 
747f8e14497SBen Gardon 	if (record_acc_track)
748626808d1SSean Christopherson 		handle_changed_spte_acc_track(old_spte, new_spte, level);
749a6a0b05dSBen Gardon 	if (record_dirty_log)
750626808d1SSean Christopherson 		handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
751626808d1SSean Christopherson 					      new_spte, level);
752ba3a6120SSean Christopherson 	return old_spte;
753626808d1SSean Christopherson }
754626808d1SSean Christopherson 
755626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
756626808d1SSean Christopherson 				     u64 new_spte, bool record_acc_track,
757626808d1SSean Christopherson 				     bool record_dirty_log)
758626808d1SSean Christopherson {
759626808d1SSean Christopherson 	WARN_ON_ONCE(iter->yielded);
760626808d1SSean Christopherson 
761ba3a6120SSean Christopherson 	iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
762ba3a6120SSean Christopherson 					    iter->old_spte, new_spte,
763ba3a6120SSean Christopherson 					    iter->gfn, iter->level,
764626808d1SSean Christopherson 					    record_acc_track, record_dirty_log);
765f8e14497SBen Gardon }
766f8e14497SBen Gardon 
767f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
768f8e14497SBen Gardon 				    u64 new_spte)
769f8e14497SBen Gardon {
770626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
771f8e14497SBen Gardon }
772f8e14497SBen Gardon 
773f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
774f8e14497SBen Gardon 						 struct tdp_iter *iter,
775f8e14497SBen Gardon 						 u64 new_spte)
776f8e14497SBen Gardon {
777626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
778a6a0b05dSBen Gardon }
779a6a0b05dSBen Gardon 
780a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
781a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
782a6a0b05dSBen Gardon 						 u64 new_spte)
783a6a0b05dSBen Gardon {
784626808d1SSean Christopherson 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
785faaf05b0SBen Gardon }
786faaf05b0SBen Gardon 
787faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
78877aa6075SDavid Matlack 	for_each_tdp_pte(_iter, _root, _start, _end)
789faaf05b0SBen Gardon 
790f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
791f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
792f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
793f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
794f8e14497SBen Gardon 			continue;					\
795f8e14497SBen Gardon 		else
796f8e14497SBen Gardon 
797bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
798b9e5603cSPaolo Bonzini 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
799bb18842eSBen Gardon 
800faaf05b0SBen Gardon /*
801e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
802e28a436cSBen Gardon  * to the scheduler.
803e28a436cSBen Gardon  *
804e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
805e139a34eSBen Gardon  * TLB flush before yielding.
806e139a34eSBen Gardon  *
8073a0f64deSSean Christopherson  * If this function yields, iter->yielded is set and the caller must skip to
8083a0f64deSSean Christopherson  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
8093a0f64deSSean Christopherson  * over the paging structures to allow the iterator to continue its traversal
8103a0f64deSSean Christopherson  * from the paging structure root.
811e28a436cSBen Gardon  *
8123a0f64deSSean Christopherson  * Returns true if this function yielded.
813e28a436cSBen Gardon  */
8143a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
8153a0f64deSSean Christopherson 							  struct tdp_iter *iter,
8163a0f64deSSean Christopherson 							  bool flush, bool shared)
817a6a0b05dSBen Gardon {
8183a0f64deSSean Christopherson 	WARN_ON(iter->yielded);
8193a0f64deSSean Christopherson 
820ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
821ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
822ed5e484bSBen Gardon 		return false;
823ed5e484bSBen Gardon 
824531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
825e139a34eSBen Gardon 		if (flush)
826e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
827e139a34eSBen Gardon 
828bd296779SSean Christopherson 		rcu_read_unlock();
829bd296779SSean Christopherson 
8306103bc07SBen Gardon 		if (shared)
8316103bc07SBen Gardon 			cond_resched_rwlock_read(&kvm->mmu_lock);
8326103bc07SBen Gardon 		else
833531810caSBen Gardon 			cond_resched_rwlock_write(&kvm->mmu_lock);
8346103bc07SBen Gardon 
8357cca2d0bSBen Gardon 		rcu_read_lock();
836ed5e484bSBen Gardon 
837ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
838ed5e484bSBen Gardon 
8393a0f64deSSean Christopherson 		iter->yielded = true;
840a6a0b05dSBen Gardon 	}
841e28a436cSBen Gardon 
8423a0f64deSSean Christopherson 	return iter->yielded;
843a6a0b05dSBen Gardon }
844a6a0b05dSBen Gardon 
84586931ff7SSean Christopherson static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
846e2b5b21dSSean Christopherson {
847e2b5b21dSSean Christopherson 	/*
84886931ff7SSean Christopherson 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
84986931ff7SSean Christopherson 	 * a gpa range that would exceed the max gfn, and KVM does not create
85086931ff7SSean Christopherson 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
85186931ff7SSean Christopherson 	 * the slow emulation path every time.
852e2b5b21dSSean Christopherson 	 */
85386931ff7SSean Christopherson 	return kvm_mmu_max_gfn() + 1;
854e2b5b21dSSean Christopherson }
855e2b5b21dSSean Christopherson 
8561b6043e8SSean Christopherson static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
8571b6043e8SSean Christopherson 			       bool shared, int zap_level)
858e2b5b21dSSean Christopherson {
859e2b5b21dSSean Christopherson 	struct tdp_iter iter;
860e2b5b21dSSean Christopherson 
86186931ff7SSean Christopherson 	gfn_t end = tdp_mmu_max_gfn_exclusive();
862e2b5b21dSSean Christopherson 	gfn_t start = 0;
863e2b5b21dSSean Christopherson 
8641b6043e8SSean Christopherson 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
8651b6043e8SSean Christopherson retry:
8661b6043e8SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
8671b6043e8SSean Christopherson 			continue;
8681b6043e8SSean Christopherson 
8691b6043e8SSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
8701b6043e8SSean Christopherson 			continue;
8711b6043e8SSean Christopherson 
8721b6043e8SSean Christopherson 		if (iter.level > zap_level)
8731b6043e8SSean Christopherson 			continue;
8741b6043e8SSean Christopherson 
8751b6043e8SSean Christopherson 		if (!shared)
8761b6043e8SSean Christopherson 			tdp_mmu_set_spte(kvm, &iter, 0);
8771b6043e8SSean Christopherson 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
8781b6043e8SSean Christopherson 			goto retry;
8791b6043e8SSean Christopherson 	}
8801b6043e8SSean Christopherson }
8811b6043e8SSean Christopherson 
8821b6043e8SSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
8831b6043e8SSean Christopherson 			     bool shared)
8841b6043e8SSean Christopherson {
8851b6043e8SSean Christopherson 
8868351779cSPaolo Bonzini 	/*
8878351779cSPaolo Bonzini 	 * The root must have an elevated refcount so that it's reachable via
8888351779cSPaolo Bonzini 	 * mmu_notifier callbacks, which allows this path to yield and drop
8898351779cSPaolo Bonzini 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
8908351779cSPaolo Bonzini 	 * must drop all references to relevant pages prior to completing the
8918351779cSPaolo Bonzini 	 * callback.  Dropping mmu_lock with an unreachable root would result
8928351779cSPaolo Bonzini 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
8938351779cSPaolo Bonzini 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
8948351779cSPaolo Bonzini 	 * dirty accessed bits to the SPTE's associated struct page.
8958351779cSPaolo Bonzini 	 */
8968351779cSPaolo Bonzini 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
8978351779cSPaolo Bonzini 
898e2b5b21dSSean Christopherson 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
899e2b5b21dSSean Christopherson 
900e2b5b21dSSean Christopherson 	rcu_read_lock();
901e2b5b21dSSean Christopherson 
902e2b5b21dSSean Christopherson 	/*
9031b6043e8SSean Christopherson 	 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
9041b6043e8SSean Christopherson 	 * split the zap into two passes.  On the first pass, zap at the 1gb
9051b6043e8SSean Christopherson 	 * level, and then zap top-level SPs on the second pass.  "1gb" is not
9061b6043e8SSean Christopherson 	 * arbitrary, as KVM must be able to zap a 1gb shadow page without
9071b6043e8SSean Christopherson 	 * inducing a stall to allow in-place replacement with a 1gb hugepage.
9081b6043e8SSean Christopherson 	 *
9091b6043e8SSean Christopherson 	 * Because zapping a SP recurses on its children, stepping down to
9101b6043e8SSean Christopherson 	 * PG_LEVEL_4K in the iterator itself is unnecessary.
911e2b5b21dSSean Christopherson 	 */
9121b6043e8SSean Christopherson 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
9131b6043e8SSean Christopherson 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
914e2b5b21dSSean Christopherson 
915e2b5b21dSSean Christopherson 	rcu_read_unlock();
916e2b5b21dSSean Christopherson }
917e2b5b21dSSean Christopherson 
918c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
919c10743a1SSean Christopherson {
920c10743a1SSean Christopherson 	u64 old_spte;
921c10743a1SSean Christopherson 
922c10743a1SSean Christopherson 	/*
923c10743a1SSean Christopherson 	 * This helper intentionally doesn't allow zapping a root shadow page,
924c10743a1SSean Christopherson 	 * which doesn't have a parent page table and thus no associated entry.
925c10743a1SSean Christopherson 	 */
926c10743a1SSean Christopherson 	if (WARN_ON_ONCE(!sp->ptep))
927c10743a1SSean Christopherson 		return false;
928c10743a1SSean Christopherson 
929c10743a1SSean Christopherson 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
930bb95dfb9SSean Christopherson 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
931c10743a1SSean Christopherson 		return false;
932c10743a1SSean Christopherson 
933c10743a1SSean Christopherson 	__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
934c10743a1SSean Christopherson 			   sp->gfn, sp->role.level + 1, true, true);
935c10743a1SSean Christopherson 
936c10743a1SSean Christopherson 	return true;
937c10743a1SSean Christopherson }
938c10743a1SSean Christopherson 
939faaf05b0SBen Gardon /*
940063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
941063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
942063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
943063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
9446103bc07SBen Gardon  * operation can cause a soft lockup.
945faaf05b0SBen Gardon  */
946f47e5bbbSSean Christopherson static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
947acbda82aSSean Christopherson 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
948faaf05b0SBen Gardon {
949faaf05b0SBen Gardon 	struct tdp_iter iter;
950faaf05b0SBen Gardon 
95186931ff7SSean Christopherson 	end = min(end, tdp_mmu_max_gfn_exclusive());
952524a1e4eSSean Christopherson 
953acbda82aSSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
9546103bc07SBen Gardon 
9557cca2d0bSBen Gardon 	rcu_read_lock();
9567cca2d0bSBen Gardon 
957f47e5bbbSSean Christopherson 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
9581af4a960SBen Gardon 		if (can_yield &&
959acbda82aSSean Christopherson 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
960a835429cSSean Christopherson 			flush = false;
9611af4a960SBen Gardon 			continue;
9621af4a960SBen Gardon 		}
9631af4a960SBen Gardon 
964f47e5bbbSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte) ||
965faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
966faaf05b0SBen Gardon 			continue;
967faaf05b0SBen Gardon 
968faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
969a835429cSSean Christopherson 		flush = true;
970faaf05b0SBen Gardon 	}
9717cca2d0bSBen Gardon 
9727cca2d0bSBen Gardon 	rcu_read_unlock();
973bb95dfb9SSean Christopherson 
974f47e5bbbSSean Christopherson 	/*
975f47e5bbbSSean Christopherson 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
976f47e5bbbSSean Christopherson 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
977f47e5bbbSSean Christopherson 	 */
978f47e5bbbSSean Christopherson 	return flush;
979faaf05b0SBen Gardon }
980faaf05b0SBen Gardon 
981faaf05b0SBen Gardon /*
9827edc3a68SKai Huang  * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
9837edc3a68SKai Huang  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
9847edc3a68SKai Huang  * more SPTEs were zapped since the MMU lock was last acquired.
985faaf05b0SBen Gardon  */
986f47e5bbbSSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
987f47e5bbbSSean Christopherson 			   bool can_yield, bool flush)
988faaf05b0SBen Gardon {
989faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
990faaf05b0SBen Gardon 
991614f6970SPaolo Bonzini 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
992f47e5bbbSSean Christopherson 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
993faaf05b0SBen Gardon 
994faaf05b0SBen Gardon 	return flush;
995faaf05b0SBen Gardon }
996faaf05b0SBen Gardon 
997faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
998faaf05b0SBen Gardon {
999e2b5b21dSSean Christopherson 	struct kvm_mmu_page *root;
10002b9663d8SSean Christopherson 	int i;
1001faaf05b0SBen Gardon 
100277c8cd6bSSean Christopherson 	/*
100322b94c4bSPaolo Bonzini 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
100422b94c4bSPaolo Bonzini 	 * before returning to the caller.  Zap directly even if the root is
100522b94c4bSPaolo Bonzini 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
100622b94c4bSPaolo Bonzini 	 * all that expensive and mmu_lock is already held, which means the
100722b94c4bSPaolo Bonzini 	 * worker has yielded, i.e. flushing the work instead of zapping here
100822b94c4bSPaolo Bonzini 	 * isn't guaranteed to be any faster.
100922b94c4bSPaolo Bonzini 	 *
101077c8cd6bSSean Christopherson 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
101177c8cd6bSSean Christopherson 	 * is being destroyed or the userspace VMM has exited.  In both cases,
101277c8cd6bSSean Christopherson 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
101377c8cd6bSSean Christopherson 	 */
1014e2b5b21dSSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1015e2b5b21dSSean Christopherson 		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
1016e2b5b21dSSean Christopherson 			tdp_mmu_zap_root(kvm, root, false);
1017e2b5b21dSSean Christopherson 	}
1018faaf05b0SBen Gardon }
1019bb18842eSBen Gardon 
10204c6654bdSBen Gardon /*
1021f28e9c7fSSean Christopherson  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
102222b94c4bSPaolo Bonzini  * zap" completes.
10234c6654bdSBen Gardon  */
10244c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
10254c6654bdSBen Gardon {
102622b94c4bSPaolo Bonzini 	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
10274c6654bdSBen Gardon }
10284c6654bdSBen Gardon 
1029bb18842eSBen Gardon /*
1030f28e9c7fSSean Christopherson  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
103122b94c4bSPaolo Bonzini  * is about to be zapped, e.g. in response to a memslots update.  The actual
103222b94c4bSPaolo Bonzini  * zapping is performed asynchronously, so a reference is taken on all roots.
103322b94c4bSPaolo Bonzini  * Using a separate workqueue makes it easy to ensure that the destruction is
103422b94c4bSPaolo Bonzini  * performed before the "fast zap" completes, without keeping a separate list
103522b94c4bSPaolo Bonzini  * of invalidated roots; the list is effectively the list of work items in
103622b94c4bSPaolo Bonzini  * the workqueue.
1037b7cccd39SBen Gardon  *
103822b94c4bSPaolo Bonzini  * Get a reference even if the root is already invalid, the asynchronous worker
103922b94c4bSPaolo Bonzini  * assumes it was gifted a reference to the root it processes.  Because mmu_lock
104022b94c4bSPaolo Bonzini  * is held for write, it should be impossible to observe a root with zero refcount,
104122b94c4bSPaolo Bonzini  * i.e. the list of roots cannot be stale.
10424c6654bdSBen Gardon  *
1043b7cccd39SBen Gardon  * This has essentially the same effect for the TDP MMU
1044b7cccd39SBen Gardon  * as updating mmu_valid_gen does for the shadow MMU.
1045b7cccd39SBen Gardon  */
1046b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
1047b7cccd39SBen Gardon {
1048b7cccd39SBen Gardon 	struct kvm_mmu_page *root;
1049b7cccd39SBen Gardon 
1050b7cccd39SBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1051f28e9c7fSSean Christopherson 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1052efd995daSPaolo Bonzini 		if (!root->role.invalid &&
1053efd995daSPaolo Bonzini 		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
1054b7cccd39SBen Gardon 			root->role.invalid = true;
105522b94c4bSPaolo Bonzini 			tdp_mmu_schedule_zap_root(kvm, root);
105622b94c4bSPaolo Bonzini 		}
1057b7cccd39SBen Gardon 	}
1058f28e9c7fSSean Christopherson }
1059b7cccd39SBen Gardon 
1060bb18842eSBen Gardon /*
1061bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
1062bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
1063bb18842eSBen Gardon  */
1064cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1065cdc47767SPaolo Bonzini 					  struct kvm_page_fault *fault,
1066cdc47767SPaolo Bonzini 					  struct tdp_iter *iter)
1067bb18842eSBen Gardon {
1068c435d4b7SSean Christopherson 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1069bb18842eSBen Gardon 	u64 new_spte;
107057a3e96dSKai Huang 	int ret = RET_PF_FIXED;
1071ad67e480SPaolo Bonzini 	bool wrprot = false;
1072bb18842eSBen Gardon 
10737158bee4SPaolo Bonzini 	WARN_ON(sp->role.level != fault->goal_level);
1074e710c5f6SDavid Matlack 	if (unlikely(!fault->slot))
1075bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
10769a77daacSBen Gardon 	else
107753597858SDavid Matlack 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
10782839180cSPaolo Bonzini 					 fault->pfn, iter->old_spte, fault->prefetch, true,
10797158bee4SPaolo Bonzini 					 fault->map_writable, &new_spte);
1080bb18842eSBen Gardon 
1081bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
1082bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
10833e72c791SDavid Matlack 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
10849a77daacSBen Gardon 		return RET_PF_RETRY;
1085bb95dfb9SSean Christopherson 	else if (is_shadow_present_pte(iter->old_spte) &&
1086bb95dfb9SSean Christopherson 		 !is_last_spte(iter->old_spte, iter->level))
1087bb95dfb9SSean Christopherson 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1088bb95dfb9SSean Christopherson 						   KVM_PAGES_PER_HPAGE(iter->level + 1));
1089bb18842eSBen Gardon 
1090bb18842eSBen Gardon 	/*
1091bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
1092bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
1093bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
1094bb18842eSBen Gardon 	 */
1095ad67e480SPaolo Bonzini 	if (wrprot) {
1096cdc47767SPaolo Bonzini 		if (fault->write)
1097bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
1098bb18842eSBen Gardon 	}
1099bb18842eSBen Gardon 
1100bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
11019a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
11021075d41eSSean Christopherson 		vcpu->stat.pf_mmio_spte_created++;
11039a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
11049a77daacSBen Gardon 				     new_spte);
1105bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
11063849e092SSean Christopherson 	} else {
11079a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
11089a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
11093849e092SSean Christopherson 	}
1110bb18842eSBen Gardon 
1111bb18842eSBen Gardon 	return ret;
1112bb18842eSBen Gardon }
1113bb18842eSBen Gardon 
1114bb18842eSBen Gardon /*
1115cb00a70bSDavid Matlack  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1116cb00a70bSDavid Matlack  * provided page table.
11177b7e1ab6SDavid Matlack  *
11187b7e1ab6SDavid Matlack  * @kvm: kvm instance
11197b7e1ab6SDavid Matlack  * @iter: a tdp_iter instance currently on the SPTE that should be set
11207b7e1ab6SDavid Matlack  * @sp: The new TDP page table to install.
11217b7e1ab6SDavid Matlack  * @account_nx: True if this page table is being installed to split a
11227b7e1ab6SDavid Matlack  *              non-executable huge page.
1123cb00a70bSDavid Matlack  * @shared: This operation is running under the MMU lock in read mode.
11247b7e1ab6SDavid Matlack  *
11257b7e1ab6SDavid Matlack  * Returns: 0 if the new page table was installed. Non-0 if the page table
11267b7e1ab6SDavid Matlack  *          could not be installed (e.g. the atomic compare-exchange failed).
11277b7e1ab6SDavid Matlack  */
1128cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1129cb00a70bSDavid Matlack 			   struct kvm_mmu_page *sp, bool account_nx,
1130cb00a70bSDavid Matlack 			   bool shared)
11317b7e1ab6SDavid Matlack {
113254275f74SSean Christopherson 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1133cb00a70bSDavid Matlack 	int ret = 0;
11347b7e1ab6SDavid Matlack 
1135cb00a70bSDavid Matlack 	if (shared) {
11367b7e1ab6SDavid Matlack 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
11377b7e1ab6SDavid Matlack 		if (ret)
11387b7e1ab6SDavid Matlack 			return ret;
1139cb00a70bSDavid Matlack 	} else {
1140cb00a70bSDavid Matlack 		tdp_mmu_set_spte(kvm, iter, spte);
1141cb00a70bSDavid Matlack 	}
11427b7e1ab6SDavid Matlack 
11437b7e1ab6SDavid Matlack 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
11447b7e1ab6SDavid Matlack 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
11457b7e1ab6SDavid Matlack 	if (account_nx)
1146*55c510e2SSean Christopherson 		account_nx_huge_page(kvm, sp, true);
11477b7e1ab6SDavid Matlack 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
114843a063caSYosry Ahmed 	tdp_account_mmu_page(kvm, sp);
11497b7e1ab6SDavid Matlack 
11507b7e1ab6SDavid Matlack 	return 0;
11517b7e1ab6SDavid Matlack }
11527b7e1ab6SDavid Matlack 
11537b7e1ab6SDavid Matlack /*
1154bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1155bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
1156bb18842eSBen Gardon  */
11572f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1158bb18842eSBen Gardon {
1159bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1160bb18842eSBen Gardon 	struct tdp_iter iter;
116189c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
1162bb18842eSBen Gardon 	int ret;
1163bb18842eSBen Gardon 
116473a3c659SPaolo Bonzini 	kvm_mmu_hugepage_adjust(vcpu, fault);
1165bb18842eSBen Gardon 
1166f0066d94SPaolo Bonzini 	trace_kvm_mmu_spte_requested(fault);
11677cca2d0bSBen Gardon 
11687cca2d0bSBen Gardon 	rcu_read_lock();
11697cca2d0bSBen Gardon 
11702f6305ddSPaolo Bonzini 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
117173a3c659SPaolo Bonzini 		if (fault->nx_huge_page_workaround_enabled)
1172536f0e6aSPaolo Bonzini 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1173bb18842eSBen Gardon 
117473a3c659SPaolo Bonzini 		if (iter.level == fault->goal_level)
1175bb18842eSBen Gardon 			break;
1176bb18842eSBen Gardon 
1177bb18842eSBen Gardon 		/*
1178bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
1179bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
1180bb18842eSBen Gardon 		 * with a non-leaf SPTE.
1181bb18842eSBen Gardon 		 */
1182bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
1183bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
11843e72c791SDavid Matlack 			if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
11859a77daacSBen Gardon 				break;
1186bb18842eSBen Gardon 
1187bb18842eSBen Gardon 			/*
1188bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
1189bb18842eSBen Gardon 			 * because the new value informs the !present
1190bb18842eSBen Gardon 			 * path below.
1191bb18842eSBen Gardon 			 */
11920e587aa7SSean Christopherson 			iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
1193bb18842eSBen Gardon 		}
1194bb18842eSBen Gardon 
1195bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
11967b7e1ab6SDavid Matlack 			bool account_nx = fault->huge_page_disallowed &&
11977b7e1ab6SDavid Matlack 					  fault->req_level >= iter.level;
11987b7e1ab6SDavid Matlack 
1199ff76d506SKai Huang 			/*
1200c4342633SIngo Molnar 			 * If SPTE has been frozen by another thread, just
1201ff76d506SKai Huang 			 * give up and retry, avoiding unnecessary page table
1202ff76d506SKai Huang 			 * allocation and free.
1203ff76d506SKai Huang 			 */
1204ff76d506SKai Huang 			if (is_removed_spte(iter.old_spte))
1205ff76d506SKai Huang 				break;
1206ff76d506SKai Huang 
1207a82070b6SDavid Matlack 			sp = tdp_mmu_alloc_sp(vcpu);
1208a82070b6SDavid Matlack 			tdp_mmu_init_child_sp(sp, &iter);
1209a82070b6SDavid Matlack 
1210cb00a70bSDavid Matlack 			if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) {
12119a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
12129a77daacSBen Gardon 				break;
12139a77daacSBen Gardon 			}
1214bb18842eSBen Gardon 		}
1215bb18842eSBen Gardon 	}
1216bb18842eSBen Gardon 
121758298b06SSean Christopherson 	/*
121858298b06SSean Christopherson 	 * Force the guest to retry the access if the upper level SPTEs aren't
121958298b06SSean Christopherson 	 * in place, or if the target leaf SPTE is frozen by another CPU.
122058298b06SSean Christopherson 	 */
122158298b06SSean Christopherson 	if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) {
12227cca2d0bSBen Gardon 		rcu_read_unlock();
1223bb18842eSBen Gardon 		return RET_PF_RETRY;
12247cca2d0bSBen Gardon 	}
1225bb18842eSBen Gardon 
1226cdc47767SPaolo Bonzini 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
12277cca2d0bSBen Gardon 	rcu_read_unlock();
1228bb18842eSBen Gardon 
1229bb18842eSBen Gardon 	return ret;
1230bb18842eSBen Gardon }
1231063afacdSBen Gardon 
12323039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
12333039bcc7SSean Christopherson 				 bool flush)
1234063afacdSBen Gardon {
1235f47e5bbbSSean Christopherson 	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
123683b83a02SSean Christopherson 				     range->end, range->may_block, flush);
12373039bcc7SSean Christopherson }
12383039bcc7SSean Christopherson 
12393039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
12403039bcc7SSean Christopherson 			      struct kvm_gfn_range *range);
12413039bcc7SSean Christopherson 
12423039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
12433039bcc7SSean Christopherson 						   struct kvm_gfn_range *range,
1244c1b91493SSean Christopherson 						   tdp_handler_t handler)
1245063afacdSBen Gardon {
1246063afacdSBen Gardon 	struct kvm_mmu_page *root;
12473039bcc7SSean Christopherson 	struct tdp_iter iter;
12483039bcc7SSean Christopherson 	bool ret = false;
1249063afacdSBen Gardon 
1250063afacdSBen Gardon 	/*
1251e1eed584SSean Christopherson 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1252e1eed584SSean Christopherson 	 * into this helper allow blocking; it'd be dead, wasteful code.
1253063afacdSBen Gardon 	 */
12543039bcc7SSean Christopherson 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1255a151acecSSean Christopherson 		rcu_read_lock();
1256a151acecSSean Christopherson 
12573039bcc7SSean Christopherson 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
12583039bcc7SSean Christopherson 			ret |= handler(kvm, &iter, range);
1259063afacdSBen Gardon 
12603039bcc7SSean Christopherson 		rcu_read_unlock();
1261a151acecSSean Christopherson 	}
1262063afacdSBen Gardon 
1263063afacdSBen Gardon 	return ret;
1264063afacdSBen Gardon }
1265063afacdSBen Gardon 
1266f8e14497SBen Gardon /*
1267f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1268f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
1269f8e14497SBen Gardon  */
12703039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
12713039bcc7SSean Christopherson 			  struct kvm_gfn_range *range)
1272f8e14497SBen Gardon {
1273f8e14497SBen Gardon 	u64 new_spte = 0;
1274f8e14497SBen Gardon 
12753039bcc7SSean Christopherson 	/* If we have a non-accessed entry we don't need to change the pte. */
12763039bcc7SSean Christopherson 	if (!is_accessed_spte(iter->old_spte))
12773039bcc7SSean Christopherson 		return false;
12787cca2d0bSBen Gardon 
12793039bcc7SSean Christopherson 	new_spte = iter->old_spte;
1280f8e14497SBen Gardon 
1281f8e14497SBen Gardon 	if (spte_ad_enabled(new_spte)) {
12828f8f52a4SSean Christopherson 		new_spte &= ~shadow_accessed_mask;
1283f8e14497SBen Gardon 	} else {
1284f8e14497SBen Gardon 		/*
1285f8e14497SBen Gardon 		 * Capture the dirty status of the page, so that it doesn't get
1286f8e14497SBen Gardon 		 * lost when the SPTE is marked for access tracking.
1287f8e14497SBen Gardon 		 */
1288f8e14497SBen Gardon 		if (is_writable_pte(new_spte))
1289f8e14497SBen Gardon 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1290f8e14497SBen Gardon 
1291f8e14497SBen Gardon 		new_spte = mark_spte_for_access_track(new_spte);
1292f8e14497SBen Gardon 	}
1293f8e14497SBen Gardon 
12943039bcc7SSean Christopherson 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
129533dd3574SBen Gardon 
12963039bcc7SSean Christopherson 	return true;
1297f8e14497SBen Gardon }
1298f8e14497SBen Gardon 
12993039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1300f8e14497SBen Gardon {
13013039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1302f8e14497SBen Gardon }
1303f8e14497SBen Gardon 
13043039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
13053039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
1306f8e14497SBen Gardon {
13073039bcc7SSean Christopherson 	return is_accessed_spte(iter->old_spte);
1308f8e14497SBen Gardon }
1309f8e14497SBen Gardon 
13103039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1311f8e14497SBen Gardon {
13123039bcc7SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
13133039bcc7SSean Christopherson }
13143039bcc7SSean Christopherson 
13153039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
13163039bcc7SSean Christopherson 			 struct kvm_gfn_range *range)
13173039bcc7SSean Christopherson {
13183039bcc7SSean Christopherson 	u64 new_spte;
13193039bcc7SSean Christopherson 
13203039bcc7SSean Christopherson 	/* Huge pages aren't expected to be modified without first being zapped. */
13213039bcc7SSean Christopherson 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
13223039bcc7SSean Christopherson 
13233039bcc7SSean Christopherson 	if (iter->level != PG_LEVEL_4K ||
13243039bcc7SSean Christopherson 	    !is_shadow_present_pte(iter->old_spte))
13253039bcc7SSean Christopherson 		return false;
13263039bcc7SSean Christopherson 
13273039bcc7SSean Christopherson 	/*
13283039bcc7SSean Christopherson 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
13293039bcc7SSean Christopherson 	 * zero the SPTE before setting the new PFN, but doing so preserves the
13303039bcc7SSean Christopherson 	 * invariant that the PFN of a present * leaf SPTE can never change.
13313039bcc7SSean Christopherson 	 * See __handle_changed_spte().
13323039bcc7SSean Christopherson 	 */
13333039bcc7SSean Christopherson 	tdp_mmu_set_spte(kvm, iter, 0);
13343039bcc7SSean Christopherson 
13353039bcc7SSean Christopherson 	if (!pte_write(range->pte)) {
13363039bcc7SSean Christopherson 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
13373039bcc7SSean Christopherson 								  pte_pfn(range->pte));
13383039bcc7SSean Christopherson 
13393039bcc7SSean Christopherson 		tdp_mmu_set_spte(kvm, iter, new_spte);
13403039bcc7SSean Christopherson 	}
13413039bcc7SSean Christopherson 
13423039bcc7SSean Christopherson 	return true;
1343f8e14497SBen Gardon }
13441d8dd6b3SBen Gardon 
13451d8dd6b3SBen Gardon /*
13461d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
13471d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
13481d8dd6b3SBen Gardon  * notifier.
13491d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
13501d8dd6b3SBen Gardon  */
13513039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
13521d8dd6b3SBen Gardon {
135393fa50f6SSean Christopherson 	/*
135493fa50f6SSean Christopherson 	 * No need to handle the remote TLB flush under RCU protection, the
135593fa50f6SSean Christopherson 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
135693fa50f6SSean Christopherson 	 * shadow page.  See the WARN on pfn_changed in __handle_changed_spte().
135793fa50f6SSean Christopherson 	 */
135893fa50f6SSean Christopherson 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
13591d8dd6b3SBen Gardon }
13601d8dd6b3SBen Gardon 
1361a6a0b05dSBen Gardon /*
1362bedd9195SDavid Matlack  * Remove write access from all SPTEs at or above min_level that map GFNs
1363bedd9195SDavid Matlack  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1364bedd9195SDavid Matlack  * be flushed.
1365a6a0b05dSBen Gardon  */
1366a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1367a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1368a6a0b05dSBen Gardon {
1369a6a0b05dSBen Gardon 	struct tdp_iter iter;
1370a6a0b05dSBen Gardon 	u64 new_spte;
1371a6a0b05dSBen Gardon 	bool spte_set = false;
1372a6a0b05dSBen Gardon 
13737cca2d0bSBen Gardon 	rcu_read_lock();
13747cca2d0bSBen Gardon 
1375a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1376a6a0b05dSBen Gardon 
137777aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
137824ae4cfaSBen Gardon retry:
137924ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
13801af4a960SBen Gardon 			continue;
13811af4a960SBen Gardon 
1382a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
13830f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
13840f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1385a6a0b05dSBen Gardon 			continue;
1386a6a0b05dSBen Gardon 
1387a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1388a6a0b05dSBen Gardon 
13893e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
139024ae4cfaSBen Gardon 			goto retry;
13913255530aSDavid Matlack 
1392a6a0b05dSBen Gardon 		spte_set = true;
1393a6a0b05dSBen Gardon 	}
13947cca2d0bSBen Gardon 
13957cca2d0bSBen Gardon 	rcu_read_unlock();
1396a6a0b05dSBen Gardon 	return spte_set;
1397a6a0b05dSBen Gardon }
1398a6a0b05dSBen Gardon 
1399a6a0b05dSBen Gardon /*
1400a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1401a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1402a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1403a6a0b05dSBen Gardon  */
1404269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1405269e9552SHamza Mahfooz 			     const struct kvm_memory_slot *slot, int min_level)
1406a6a0b05dSBen Gardon {
1407a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1408a6a0b05dSBen Gardon 	bool spte_set = false;
1409a6a0b05dSBen Gardon 
141024ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1411a6a0b05dSBen Gardon 
1412d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1413a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1414a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1415a6a0b05dSBen Gardon 
1416a6a0b05dSBen Gardon 	return spte_set;
1417a6a0b05dSBen Gardon }
1418a6a0b05dSBen Gardon 
1419a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1420a3fe5dbdSDavid Matlack {
1421a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1422a3fe5dbdSDavid Matlack 
1423a3fe5dbdSDavid Matlack 	gfp |= __GFP_ZERO;
1424a3fe5dbdSDavid Matlack 
1425a3fe5dbdSDavid Matlack 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1426a3fe5dbdSDavid Matlack 	if (!sp)
1427a3fe5dbdSDavid Matlack 		return NULL;
1428a3fe5dbdSDavid Matlack 
1429a3fe5dbdSDavid Matlack 	sp->spt = (void *)__get_free_page(gfp);
1430a3fe5dbdSDavid Matlack 	if (!sp->spt) {
1431a3fe5dbdSDavid Matlack 		kmem_cache_free(mmu_page_header_cache, sp);
1432a3fe5dbdSDavid Matlack 		return NULL;
1433a3fe5dbdSDavid Matlack 	}
1434a3fe5dbdSDavid Matlack 
1435a3fe5dbdSDavid Matlack 	return sp;
1436a3fe5dbdSDavid Matlack }
1437a3fe5dbdSDavid Matlack 
1438a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1439cb00a70bSDavid Matlack 						       struct tdp_iter *iter,
1440cb00a70bSDavid Matlack 						       bool shared)
1441a3fe5dbdSDavid Matlack {
1442a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp;
1443a3fe5dbdSDavid Matlack 
1444a3fe5dbdSDavid Matlack 	/*
1445a3fe5dbdSDavid Matlack 	 * Since we are allocating while under the MMU lock we have to be
1446a3fe5dbdSDavid Matlack 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1447a3fe5dbdSDavid Matlack 	 * reclaim and to avoid making any filesystem callbacks (which can end
1448a3fe5dbdSDavid Matlack 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1449a3fe5dbdSDavid Matlack 	 *
1450a3fe5dbdSDavid Matlack 	 * If this allocation fails we drop the lock and retry with reclaim
1451a3fe5dbdSDavid Matlack 	 * allowed.
1452a3fe5dbdSDavid Matlack 	 */
1453a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1454a3fe5dbdSDavid Matlack 	if (sp)
1455a3fe5dbdSDavid Matlack 		return sp;
1456a3fe5dbdSDavid Matlack 
1457a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1458cb00a70bSDavid Matlack 
1459cb00a70bSDavid Matlack 	if (shared)
1460a3fe5dbdSDavid Matlack 		read_unlock(&kvm->mmu_lock);
1461cb00a70bSDavid Matlack 	else
1462cb00a70bSDavid Matlack 		write_unlock(&kvm->mmu_lock);
1463a3fe5dbdSDavid Matlack 
1464a3fe5dbdSDavid Matlack 	iter->yielded = true;
1465a3fe5dbdSDavid Matlack 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1466a3fe5dbdSDavid Matlack 
1467cb00a70bSDavid Matlack 	if (shared)
1468a3fe5dbdSDavid Matlack 		read_lock(&kvm->mmu_lock);
1469cb00a70bSDavid Matlack 	else
1470cb00a70bSDavid Matlack 		write_lock(&kvm->mmu_lock);
1471cb00a70bSDavid Matlack 
1472a3fe5dbdSDavid Matlack 	rcu_read_lock();
1473a3fe5dbdSDavid Matlack 
1474a3fe5dbdSDavid Matlack 	return sp;
1475a3fe5dbdSDavid Matlack }
1476a3fe5dbdSDavid Matlack 
1477cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1478cb00a70bSDavid Matlack 				   struct kvm_mmu_page *sp, bool shared)
1479a3fe5dbdSDavid Matlack {
1480a3fe5dbdSDavid Matlack 	const u64 huge_spte = iter->old_spte;
1481a3fe5dbdSDavid Matlack 	const int level = iter->level;
1482a3fe5dbdSDavid Matlack 	int ret, i;
1483a3fe5dbdSDavid Matlack 
1484a3fe5dbdSDavid Matlack 	tdp_mmu_init_child_sp(sp, iter);
1485a3fe5dbdSDavid Matlack 
1486a3fe5dbdSDavid Matlack 	/*
1487a3fe5dbdSDavid Matlack 	 * No need for atomics when writing to sp->spt since the page table has
1488a3fe5dbdSDavid Matlack 	 * not been linked in yet and thus is not reachable from any other CPU.
1489a3fe5dbdSDavid Matlack 	 */
14902ca3129eSSean Christopherson 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
149147855da0SDavid Matlack 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1492a3fe5dbdSDavid Matlack 
1493a3fe5dbdSDavid Matlack 	/*
1494a3fe5dbdSDavid Matlack 	 * Replace the huge spte with a pointer to the populated lower level
1495a3fe5dbdSDavid Matlack 	 * page table. Since we are making this change without a TLB flush vCPUs
1496a3fe5dbdSDavid Matlack 	 * will see a mix of the split mappings and the original huge mapping,
1497a3fe5dbdSDavid Matlack 	 * depending on what's currently in their TLB. This is fine from a
1498a3fe5dbdSDavid Matlack 	 * correctness standpoint since the translation will be the same either
1499a3fe5dbdSDavid Matlack 	 * way.
1500a3fe5dbdSDavid Matlack 	 */
1501cb00a70bSDavid Matlack 	ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
1502a3fe5dbdSDavid Matlack 	if (ret)
1503e0b728b1SDavid Matlack 		goto out;
1504a3fe5dbdSDavid Matlack 
1505a3fe5dbdSDavid Matlack 	/*
1506a3fe5dbdSDavid Matlack 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1507a3fe5dbdSDavid Matlack 	 * are overwriting from the page stats. But we have to manually update
1508a3fe5dbdSDavid Matlack 	 * the page stats with the new present child pages.
1509a3fe5dbdSDavid Matlack 	 */
15102ca3129eSSean Christopherson 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1511a3fe5dbdSDavid Matlack 
1512e0b728b1SDavid Matlack out:
1513e0b728b1SDavid Matlack 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1514e0b728b1SDavid Matlack 	return ret;
1515a3fe5dbdSDavid Matlack }
1516a3fe5dbdSDavid Matlack 
1517a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1518a3fe5dbdSDavid Matlack 					 struct kvm_mmu_page *root,
1519a3fe5dbdSDavid Matlack 					 gfn_t start, gfn_t end,
1520cb00a70bSDavid Matlack 					 int target_level, bool shared)
1521a3fe5dbdSDavid Matlack {
1522a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *sp = NULL;
1523a3fe5dbdSDavid Matlack 	struct tdp_iter iter;
1524a3fe5dbdSDavid Matlack 	int ret = 0;
1525a3fe5dbdSDavid Matlack 
1526a3fe5dbdSDavid Matlack 	rcu_read_lock();
1527a3fe5dbdSDavid Matlack 
1528a3fe5dbdSDavid Matlack 	/*
1529a3fe5dbdSDavid Matlack 	 * Traverse the page table splitting all huge pages above the target
1530a3fe5dbdSDavid Matlack 	 * level into one lower level. For example, if we encounter a 1GB page
1531a3fe5dbdSDavid Matlack 	 * we split it into 512 2MB pages.
1532a3fe5dbdSDavid Matlack 	 *
1533a3fe5dbdSDavid Matlack 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1534a3fe5dbdSDavid Matlack 	 * to visit an SPTE before ever visiting its children, which means we
1535a3fe5dbdSDavid Matlack 	 * will correctly recursively split huge pages that are more than one
1536a3fe5dbdSDavid Matlack 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1537a3fe5dbdSDavid Matlack 	 * and then splitting each of those to 512 4KB pages).
1538a3fe5dbdSDavid Matlack 	 */
1539a3fe5dbdSDavid Matlack 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1540a3fe5dbdSDavid Matlack retry:
1541cb00a70bSDavid Matlack 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1542a3fe5dbdSDavid Matlack 			continue;
1543a3fe5dbdSDavid Matlack 
1544a3fe5dbdSDavid Matlack 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1545a3fe5dbdSDavid Matlack 			continue;
1546a3fe5dbdSDavid Matlack 
1547a3fe5dbdSDavid Matlack 		if (!sp) {
1548cb00a70bSDavid Matlack 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1549a3fe5dbdSDavid Matlack 			if (!sp) {
1550a3fe5dbdSDavid Matlack 				ret = -ENOMEM;
1551e0b728b1SDavid Matlack 				trace_kvm_mmu_split_huge_page(iter.gfn,
1552e0b728b1SDavid Matlack 							      iter.old_spte,
1553e0b728b1SDavid Matlack 							      iter.level, ret);
1554a3fe5dbdSDavid Matlack 				break;
1555a3fe5dbdSDavid Matlack 			}
1556a3fe5dbdSDavid Matlack 
1557a3fe5dbdSDavid Matlack 			if (iter.yielded)
1558a3fe5dbdSDavid Matlack 				continue;
1559a3fe5dbdSDavid Matlack 		}
1560a3fe5dbdSDavid Matlack 
1561cb00a70bSDavid Matlack 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1562a3fe5dbdSDavid Matlack 			goto retry;
1563a3fe5dbdSDavid Matlack 
1564a3fe5dbdSDavid Matlack 		sp = NULL;
1565a3fe5dbdSDavid Matlack 	}
1566a3fe5dbdSDavid Matlack 
1567a3fe5dbdSDavid Matlack 	rcu_read_unlock();
1568a3fe5dbdSDavid Matlack 
1569a3fe5dbdSDavid Matlack 	/*
1570a3fe5dbdSDavid Matlack 	 * It's possible to exit the loop having never used the last sp if, for
1571a3fe5dbdSDavid Matlack 	 * example, a vCPU doing HugePage NX splitting wins the race and
1572a3fe5dbdSDavid Matlack 	 * installs its own sp in place of the last sp we tried to split.
1573a3fe5dbdSDavid Matlack 	 */
1574a3fe5dbdSDavid Matlack 	if (sp)
1575a3fe5dbdSDavid Matlack 		tdp_mmu_free_sp(sp);
1576a3fe5dbdSDavid Matlack 
1577a3fe5dbdSDavid Matlack 	return ret;
1578a3fe5dbdSDavid Matlack }
1579a3fe5dbdSDavid Matlack 
1580cb00a70bSDavid Matlack 
1581a3fe5dbdSDavid Matlack /*
1582a3fe5dbdSDavid Matlack  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1583a3fe5dbdSDavid Matlack  */
1584a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1585a3fe5dbdSDavid Matlack 				      const struct kvm_memory_slot *slot,
1586a3fe5dbdSDavid Matlack 				      gfn_t start, gfn_t end,
1587cb00a70bSDavid Matlack 				      int target_level, bool shared)
1588a3fe5dbdSDavid Matlack {
1589a3fe5dbdSDavid Matlack 	struct kvm_mmu_page *root;
1590a3fe5dbdSDavid Matlack 	int r = 0;
1591a3fe5dbdSDavid Matlack 
1592cb00a70bSDavid Matlack 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1593a3fe5dbdSDavid Matlack 
15947c554d8eSPaolo Bonzini 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1595cb00a70bSDavid Matlack 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1596a3fe5dbdSDavid Matlack 		if (r) {
1597cb00a70bSDavid Matlack 			kvm_tdp_mmu_put_root(kvm, root, shared);
1598a3fe5dbdSDavid Matlack 			break;
1599a3fe5dbdSDavid Matlack 		}
1600a3fe5dbdSDavid Matlack 	}
1601a3fe5dbdSDavid Matlack }
1602a3fe5dbdSDavid Matlack 
1603a6a0b05dSBen Gardon /*
1604a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1605a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1606a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1607a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1608a6a0b05dSBen Gardon  * be flushed.
1609a6a0b05dSBen Gardon  */
1610a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1611a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1612a6a0b05dSBen Gardon {
1613a6a0b05dSBen Gardon 	struct tdp_iter iter;
1614a6a0b05dSBen Gardon 	u64 new_spte;
1615a6a0b05dSBen Gardon 	bool spte_set = false;
1616a6a0b05dSBen Gardon 
16177cca2d0bSBen Gardon 	rcu_read_lock();
16187cca2d0bSBen Gardon 
1619a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
162024ae4cfaSBen Gardon retry:
162124ae4cfaSBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
16221af4a960SBen Gardon 			continue;
16231af4a960SBen Gardon 
16243354ef5aSSean Christopherson 		if (!is_shadow_present_pte(iter.old_spte))
16253354ef5aSSean Christopherson 			continue;
16263354ef5aSSean Christopherson 
1627a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1628a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1629a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1630a6a0b05dSBen Gardon 			else
1631a6a0b05dSBen Gardon 				continue;
1632a6a0b05dSBen Gardon 		} else {
1633a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1634a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1635a6a0b05dSBen Gardon 			else
1636a6a0b05dSBen Gardon 				continue;
1637a6a0b05dSBen Gardon 		}
1638a6a0b05dSBen Gardon 
16393e72c791SDavid Matlack 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
164024ae4cfaSBen Gardon 			goto retry;
16413255530aSDavid Matlack 
1642a6a0b05dSBen Gardon 		spte_set = true;
1643a6a0b05dSBen Gardon 	}
16447cca2d0bSBen Gardon 
16457cca2d0bSBen Gardon 	rcu_read_unlock();
1646a6a0b05dSBen Gardon 	return spte_set;
1647a6a0b05dSBen Gardon }
1648a6a0b05dSBen Gardon 
1649a6a0b05dSBen Gardon /*
1650a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1651a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1652a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1653a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1654a6a0b05dSBen Gardon  * be flushed.
1655a6a0b05dSBen Gardon  */
1656269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1657269e9552SHamza Mahfooz 				  const struct kvm_memory_slot *slot)
1658a6a0b05dSBen Gardon {
1659a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1660a6a0b05dSBen Gardon 	bool spte_set = false;
1661a6a0b05dSBen Gardon 
166224ae4cfaSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
1663a6a0b05dSBen Gardon 
1664d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1665a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1666a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1667a6a0b05dSBen Gardon 
1668a6a0b05dSBen Gardon 	return spte_set;
1669a6a0b05dSBen Gardon }
1670a6a0b05dSBen Gardon 
1671a6a0b05dSBen Gardon /*
1672a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1673a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1674a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1675a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1676a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1677a6a0b05dSBen Gardon  */
1678a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1679a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1680a6a0b05dSBen Gardon {
1681a6a0b05dSBen Gardon 	struct tdp_iter iter;
1682a6a0b05dSBen Gardon 	u64 new_spte;
1683a6a0b05dSBen Gardon 
16847cca2d0bSBen Gardon 	rcu_read_lock();
16857cca2d0bSBen Gardon 
1686a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1687a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1688a6a0b05dSBen Gardon 		if (!mask)
1689a6a0b05dSBen Gardon 			break;
1690a6a0b05dSBen Gardon 
1691a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1692a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1693a6a0b05dSBen Gardon 			continue;
1694a6a0b05dSBen Gardon 
1695f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1696f1b3b06aSBen Gardon 
1697a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1698a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1699a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1700a6a0b05dSBen Gardon 			else
1701a6a0b05dSBen Gardon 				continue;
1702a6a0b05dSBen Gardon 		} else {
1703a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1704a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1705a6a0b05dSBen Gardon 			else
1706a6a0b05dSBen Gardon 				continue;
1707a6a0b05dSBen Gardon 		}
1708a6a0b05dSBen Gardon 
1709a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1710a6a0b05dSBen Gardon 	}
17117cca2d0bSBen Gardon 
17127cca2d0bSBen Gardon 	rcu_read_unlock();
1713a6a0b05dSBen Gardon }
1714a6a0b05dSBen Gardon 
1715a6a0b05dSBen Gardon /*
1716a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1717a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1718a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1719a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1720a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1721a6a0b05dSBen Gardon  */
1722a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1723a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1724a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1725a6a0b05dSBen Gardon 				       bool wrprot)
1726a6a0b05dSBen Gardon {
1727a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1728a6a0b05dSBen Gardon 
1729531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1730a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1731a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1732a6a0b05dSBen Gardon }
1733a6a0b05dSBen Gardon 
17344b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm,
173514881998SBen Gardon 				       struct kvm_mmu_page *root,
17364b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
173714881998SBen Gardon {
17389eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
17399eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
174014881998SBen Gardon 	struct tdp_iter iter;
17415ba7c4c6SBen Gardon 	int max_mapping_level;
174214881998SBen Gardon 
17437cca2d0bSBen Gardon 	rcu_read_lock();
17447cca2d0bSBen Gardon 
174585f44f8cSSean Christopherson 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
174685f44f8cSSean Christopherson retry:
17474b85c921SSean Christopherson 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
17481af4a960SBen Gardon 			continue;
17491af4a960SBen Gardon 
175085f44f8cSSean Christopherson 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
175185f44f8cSSean Christopherson 		    !is_shadow_present_pte(iter.old_spte))
175285f44f8cSSean Christopherson 			continue;
175385f44f8cSSean Christopherson 
175485f44f8cSSean Christopherson 		/*
175585f44f8cSSean Christopherson 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
175685f44f8cSSean Christopherson 		 * a large page size, then its parent would have been zapped
175785f44f8cSSean Christopherson 		 * instead of stepping down.
175885f44f8cSSean Christopherson 		 */
175985f44f8cSSean Christopherson 		if (is_last_spte(iter.old_spte, iter.level))
176085f44f8cSSean Christopherson 			continue;
176185f44f8cSSean Christopherson 
176285f44f8cSSean Christopherson 		/*
176385f44f8cSSean Christopherson 		 * If iter.gfn resides outside of the slot, i.e. the page for
176485f44f8cSSean Christopherson 		 * the current level overlaps but is not contained by the slot,
176585f44f8cSSean Christopherson 		 * then the SPTE can't be made huge.  More importantly, trying
176685f44f8cSSean Christopherson 		 * to query that info from slot->arch.lpage_info will cause an
176785f44f8cSSean Christopherson 		 * out-of-bounds access.
176885f44f8cSSean Christopherson 		 */
176985f44f8cSSean Christopherson 		if (iter.gfn < start || iter.gfn >= end)
177014881998SBen Gardon 			continue;
177114881998SBen Gardon 
17725ba7c4c6SBen Gardon 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1773a8ac499bSSean Christopherson 							      iter.gfn, PG_LEVEL_NUM);
177485f44f8cSSean Christopherson 		if (max_mapping_level < iter.level)
17755ba7c4c6SBen Gardon 			continue;
17765ba7c4c6SBen Gardon 
17774b85c921SSean Christopherson 		/* Note, a successful atomic zap also does a remote TLB flush. */
177885f44f8cSSean Christopherson 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
177985f44f8cSSean Christopherson 			goto retry;
17802db6f772SBen Gardon 	}
178114881998SBen Gardon 
17827cca2d0bSBen Gardon 	rcu_read_unlock();
178314881998SBen Gardon }
178414881998SBen Gardon 
178514881998SBen Gardon /*
178685f44f8cSSean Christopherson  * Zap non-leaf SPTEs (and free their associated page tables) which could
178785f44f8cSSean Christopherson  * be replaced by huge pages, for GFNs within the slot.
178814881998SBen Gardon  */
17894b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
17904b85c921SSean Christopherson 				       const struct kvm_memory_slot *slot)
179114881998SBen Gardon {
179214881998SBen Gardon 	struct kvm_mmu_page *root;
179314881998SBen Gardon 
17942db6f772SBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
179514881998SBen Gardon 
1796d62007edSSean Christopherson 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
17974b85c921SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
179814881998SBen Gardon }
179946044f72SBen Gardon 
180046044f72SBen Gardon /*
180146044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
18025fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
180346044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
180446044f72SBen Gardon  */
180546044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
18063ad93562SKeqian Zhu 			      gfn_t gfn, int min_level)
180746044f72SBen Gardon {
180846044f72SBen Gardon 	struct tdp_iter iter;
180946044f72SBen Gardon 	u64 new_spte;
181046044f72SBen Gardon 	bool spte_set = false;
181146044f72SBen Gardon 
18123ad93562SKeqian Zhu 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
18133ad93562SKeqian Zhu 
18147cca2d0bSBen Gardon 	rcu_read_lock();
18157cca2d0bSBen Gardon 
181677aa6075SDavid Matlack 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
18173ad93562SKeqian Zhu 		if (!is_shadow_present_pte(iter.old_spte) ||
18183ad93562SKeqian Zhu 		    !is_last_spte(iter.old_spte, iter.level))
18193ad93562SKeqian Zhu 			continue;
18203ad93562SKeqian Zhu 
182146044f72SBen Gardon 		new_spte = iter.old_spte &
18225fc3424fSSean Christopherson 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
182346044f72SBen Gardon 
18247c8a4742SDavid Matlack 		if (new_spte == iter.old_spte)
18257c8a4742SDavid Matlack 			break;
18267c8a4742SDavid Matlack 
182746044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
182846044f72SBen Gardon 		spte_set = true;
182946044f72SBen Gardon 	}
183046044f72SBen Gardon 
18317cca2d0bSBen Gardon 	rcu_read_unlock();
18327cca2d0bSBen Gardon 
183346044f72SBen Gardon 	return spte_set;
183446044f72SBen Gardon }
183546044f72SBen Gardon 
183646044f72SBen Gardon /*
183746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
18385fc3424fSSean Christopherson  * MMU-writable bit to ensure future writes continue to be intercepted.
183946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
184046044f72SBen Gardon  */
184146044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
18423ad93562SKeqian Zhu 				   struct kvm_memory_slot *slot, gfn_t gfn,
18433ad93562SKeqian Zhu 				   int min_level)
184446044f72SBen Gardon {
184546044f72SBen Gardon 	struct kvm_mmu_page *root;
184646044f72SBen Gardon 	bool spte_set = false;
184746044f72SBen Gardon 
1848531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1849a3f15bdaSSean Christopherson 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
18503ad93562SKeqian Zhu 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1851a3f15bdaSSean Christopherson 
185246044f72SBen Gardon 	return spte_set;
185346044f72SBen Gardon }
185446044f72SBen Gardon 
185595fb5b02SBen Gardon /*
185695fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
185795fb5b02SBen Gardon  * That SPTE may be non-present.
1858c5c8c7c5SDavid Matlack  *
1859c5c8c7c5SDavid Matlack  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
186095fb5b02SBen Gardon  */
186139b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
186239b4d43eSSean Christopherson 			 int *root_level)
186395fb5b02SBen Gardon {
186495fb5b02SBen Gardon 	struct tdp_iter iter;
186595fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
186695fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
18672aa07893SSean Christopherson 	int leaf = -1;
186895fb5b02SBen Gardon 
1869a972e29cSPaolo Bonzini 	*root_level = vcpu->arch.mmu->root_role.level;
187095fb5b02SBen Gardon 
187195fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
187295fb5b02SBen Gardon 		leaf = iter.level;
1873dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
187495fb5b02SBen Gardon 	}
187595fb5b02SBen Gardon 
187695fb5b02SBen Gardon 	return leaf;
187795fb5b02SBen Gardon }
18786e8eb206SDavid Matlack 
18796e8eb206SDavid Matlack /*
18806e8eb206SDavid Matlack  * Returns the last level spte pointer of the shadow page walk for the given
18816e8eb206SDavid Matlack  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
18826e8eb206SDavid Matlack  * walk could be performed, returns NULL and *spte does not contain valid data.
18836e8eb206SDavid Matlack  *
18846e8eb206SDavid Matlack  * Contract:
18856e8eb206SDavid Matlack  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
18866e8eb206SDavid Matlack  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
18876e8eb206SDavid Matlack  *
18886e8eb206SDavid Matlack  * WARNING: This function is only intended to be called during fast_page_fault.
18896e8eb206SDavid Matlack  */
18906e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
18916e8eb206SDavid Matlack 					u64 *spte)
18926e8eb206SDavid Matlack {
18936e8eb206SDavid Matlack 	struct tdp_iter iter;
18946e8eb206SDavid Matlack 	struct kvm_mmu *mmu = vcpu->arch.mmu;
18956e8eb206SDavid Matlack 	gfn_t gfn = addr >> PAGE_SHIFT;
18966e8eb206SDavid Matlack 	tdp_ptep_t sptep = NULL;
18976e8eb206SDavid Matlack 
18986e8eb206SDavid Matlack 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
18996e8eb206SDavid Matlack 		*spte = iter.old_spte;
19006e8eb206SDavid Matlack 		sptep = iter.sptep;
19016e8eb206SDavid Matlack 	}
19026e8eb206SDavid Matlack 
19036e8eb206SDavid Matlack 	/*
19046e8eb206SDavid Matlack 	 * Perform the rcu_dereference to get the raw spte pointer value since
19056e8eb206SDavid Matlack 	 * we are passing it up to fast_page_fault, which is shared with the
19066e8eb206SDavid Matlack 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
19076e8eb206SDavid Matlack 	 * annotation.
19086e8eb206SDavid Matlack 	 *
19096e8eb206SDavid Matlack 	 * This is safe since fast_page_fault obeys the contracts of this
19106e8eb206SDavid Matlack 	 * function as well as all TDP MMU contracts around modifying SPTEs
19116e8eb206SDavid Matlack 	 * outside of mmu_lock.
19126e8eb206SDavid Matlack 	 */
19136e8eb206SDavid Matlack 	return rcu_dereference(sptep);
19146e8eb206SDavid Matlack }
1915