xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 3849e092)
1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0
2fe5db27dSBen Gardon 
302c00b3aSBen Gardon #include "mmu.h"
402c00b3aSBen Gardon #include "mmu_internal.h"
5bb18842eSBen Gardon #include "mmutrace.h"
62f2fad08SBen Gardon #include "tdp_iter.h"
7fe5db27dSBen Gardon #include "tdp_mmu.h"
802c00b3aSBen Gardon #include "spte.h"
9fe5db27dSBen Gardon 
109a77daacSBen Gardon #include <asm/cmpxchg.h>
1133dd3574SBen Gardon #include <trace/events/kvm.h>
1233dd3574SBen Gardon 
13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false;
1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15fe5db27dSBen Gardon 
16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */
17fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18fe5db27dSBen Gardon {
19897218ffSPaolo Bonzini 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20fe5db27dSBen Gardon 		return;
21fe5db27dSBen Gardon 
22fe5db27dSBen Gardon 	/* This should not be changed for the lifetime of the VM. */
23fe5db27dSBen Gardon 	kvm->arch.tdp_mmu_enabled = true;
2402c00b3aSBen Gardon 
2502c00b3aSBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
269a77daacSBen Gardon 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2789c0fd49SBen Gardon 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28fe5db27dSBen Gardon }
29fe5db27dSBen Gardon 
30fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
31fe5db27dSBen Gardon {
32fe5db27dSBen Gardon 	if (!kvm->arch.tdp_mmu_enabled)
33fe5db27dSBen Gardon 		return;
3402c00b3aSBen Gardon 
3502c00b3aSBen Gardon 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
367cca2d0bSBen Gardon 
377cca2d0bSBen Gardon 	/*
387cca2d0bSBen Gardon 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
397cca2d0bSBen Gardon 	 * can run before the VM is torn down.
407cca2d0bSBen Gardon 	 */
417cca2d0bSBen Gardon 	rcu_barrier();
4202c00b3aSBen Gardon }
4302c00b3aSBen Gardon 
44a889ea54SBen Gardon static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
45a889ea54SBen Gardon {
46a889ea54SBen Gardon 	if (kvm_mmu_put_root(kvm, root))
47a889ea54SBen Gardon 		kvm_tdp_mmu_free_root(kvm, root);
48a889ea54SBen Gardon }
49a889ea54SBen Gardon 
50a889ea54SBen Gardon static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
51a889ea54SBen Gardon 					   struct kvm_mmu_page *root)
52a889ea54SBen Gardon {
53531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
54a889ea54SBen Gardon 
55a889ea54SBen Gardon 	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
56a889ea54SBen Gardon 		return false;
57a889ea54SBen Gardon 
58a889ea54SBen Gardon 	kvm_mmu_get_root(kvm, root);
59a889ea54SBen Gardon 	return true;
60a889ea54SBen Gardon 
61a889ea54SBen Gardon }
62a889ea54SBen Gardon 
63a889ea54SBen Gardon static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
64a889ea54SBen Gardon 						     struct kvm_mmu_page *root)
65a889ea54SBen Gardon {
66a889ea54SBen Gardon 	struct kvm_mmu_page *next_root;
67a889ea54SBen Gardon 
68a889ea54SBen Gardon 	next_root = list_next_entry(root, link);
69a889ea54SBen Gardon 	tdp_mmu_put_root(kvm, root);
70a889ea54SBen Gardon 	return next_root;
71a889ea54SBen Gardon }
72a889ea54SBen Gardon 
73a889ea54SBen Gardon /*
74a889ea54SBen Gardon  * Note: this iterator gets and puts references to the roots it iterates over.
75a889ea54SBen Gardon  * This makes it safe to release the MMU lock and yield within the loop, but
76a889ea54SBen Gardon  * if exiting the loop early, the caller must drop the reference to the most
77a889ea54SBen Gardon  * recent root. (Unless keeping a live reference is desirable.)
78a889ea54SBen Gardon  */
79a889ea54SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)				\
80a889ea54SBen Gardon 	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
81a889ea54SBen Gardon 				      typeof(*_root), link);		\
82a889ea54SBen Gardon 	     tdp_mmu_next_root_valid(_kvm, _root);			\
83a889ea54SBen Gardon 	     _root = tdp_mmu_next_root(_kvm, _root))
84a889ea54SBen Gardon 
8502c00b3aSBen Gardon #define for_each_tdp_mmu_root(_kvm, _root)				\
8602c00b3aSBen Gardon 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
8702c00b3aSBen Gardon 
88faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
89063afacdSBen Gardon 			  gfn_t start, gfn_t end, bool can_yield);
90faaf05b0SBen Gardon 
9102c00b3aSBen Gardon void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
9202c00b3aSBen Gardon {
93339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
94faaf05b0SBen Gardon 
95531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
9602c00b3aSBen Gardon 
9702c00b3aSBen Gardon 	WARN_ON(root->root_count);
9802c00b3aSBen Gardon 	WARN_ON(!root->tdp_mmu_page);
9902c00b3aSBen Gardon 
10002c00b3aSBen Gardon 	list_del(&root->link);
10102c00b3aSBen Gardon 
102063afacdSBen Gardon 	zap_gfn_range(kvm, root, 0, max_gfn, false);
103faaf05b0SBen Gardon 
10402c00b3aSBen Gardon 	free_page((unsigned long)root->spt);
10502c00b3aSBen Gardon 	kmem_cache_free(mmu_page_header_cache, root);
10602c00b3aSBen Gardon }
10702c00b3aSBen Gardon 
10802c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
10902c00b3aSBen Gardon 						   int level)
11002c00b3aSBen Gardon {
11102c00b3aSBen Gardon 	union kvm_mmu_page_role role;
11202c00b3aSBen Gardon 
11302c00b3aSBen Gardon 	role = vcpu->arch.mmu->mmu_role.base;
11402c00b3aSBen Gardon 	role.level = level;
11502c00b3aSBen Gardon 	role.direct = true;
11602c00b3aSBen Gardon 	role.gpte_is_8_bytes = true;
11702c00b3aSBen Gardon 	role.access = ACC_ALL;
11802c00b3aSBen Gardon 
11902c00b3aSBen Gardon 	return role;
12002c00b3aSBen Gardon }
12102c00b3aSBen Gardon 
12202c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
12302c00b3aSBen Gardon 					       int level)
12402c00b3aSBen Gardon {
12502c00b3aSBen Gardon 	struct kvm_mmu_page *sp;
12602c00b3aSBen Gardon 
12702c00b3aSBen Gardon 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
12802c00b3aSBen Gardon 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
12902c00b3aSBen Gardon 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
13002c00b3aSBen Gardon 
13102c00b3aSBen Gardon 	sp->role.word = page_role_for_level(vcpu, level).word;
13202c00b3aSBen Gardon 	sp->gfn = gfn;
13302c00b3aSBen Gardon 	sp->tdp_mmu_page = true;
13402c00b3aSBen Gardon 
13533dd3574SBen Gardon 	trace_kvm_mmu_get_page(sp, true);
13633dd3574SBen Gardon 
13702c00b3aSBen Gardon 	return sp;
13802c00b3aSBen Gardon }
13902c00b3aSBen Gardon 
1406e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
14102c00b3aSBen Gardon {
14202c00b3aSBen Gardon 	union kvm_mmu_page_role role;
14302c00b3aSBen Gardon 	struct kvm *kvm = vcpu->kvm;
14402c00b3aSBen Gardon 	struct kvm_mmu_page *root;
14502c00b3aSBen Gardon 
1466e6ec584SSean Christopherson 	lockdep_assert_held_write(&kvm->mmu_lock);
14702c00b3aSBen Gardon 
1486e6ec584SSean Christopherson 	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
14902c00b3aSBen Gardon 
15002c00b3aSBen Gardon 	/* Check for an existing root before allocating a new one. */
15102c00b3aSBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
15202c00b3aSBen Gardon 		if (root->role.word == role.word) {
15302c00b3aSBen Gardon 			kvm_mmu_get_root(kvm, root);
1546e6ec584SSean Christopherson 			goto out;
15502c00b3aSBen Gardon 		}
15602c00b3aSBen Gardon 	}
15702c00b3aSBen Gardon 
15802c00b3aSBen Gardon 	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
15902c00b3aSBen Gardon 	root->root_count = 1;
16002c00b3aSBen Gardon 
16102c00b3aSBen Gardon 	list_add(&root->link, &kvm->arch.tdp_mmu_roots);
16202c00b3aSBen Gardon 
1636e6ec584SSean Christopherson out:
16402c00b3aSBen Gardon 	return __pa(root->spt);
165fe5db27dSBen Gardon }
1662f2fad08SBen Gardon 
1677cca2d0bSBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
1687cca2d0bSBen Gardon {
1697cca2d0bSBen Gardon 	free_page((unsigned long)sp->spt);
1707cca2d0bSBen Gardon 	kmem_cache_free(mmu_page_header_cache, sp);
1717cca2d0bSBen Gardon }
1727cca2d0bSBen Gardon 
1737cca2d0bSBen Gardon /*
1747cca2d0bSBen Gardon  * This is called through call_rcu in order to free TDP page table memory
1757cca2d0bSBen Gardon  * safely with respect to other kernel threads that may be operating on
1767cca2d0bSBen Gardon  * the memory.
1777cca2d0bSBen Gardon  * By only accessing TDP MMU page table memory in an RCU read critical
1787cca2d0bSBen Gardon  * section, and freeing it after a grace period, lockless access to that
1797cca2d0bSBen Gardon  * memory won't use it after it is freed.
1807cca2d0bSBen Gardon  */
1817cca2d0bSBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
1827cca2d0bSBen Gardon {
1837cca2d0bSBen Gardon 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
1847cca2d0bSBen Gardon 					       rcu_head);
1857cca2d0bSBen Gardon 
1867cca2d0bSBen Gardon 	tdp_mmu_free_sp(sp);
1877cca2d0bSBen Gardon }
1887cca2d0bSBen Gardon 
1892f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1909a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
1919a77daacSBen Gardon 				bool shared);
1922f2fad08SBen Gardon 
193faaf05b0SBen Gardon static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
194faaf05b0SBen Gardon {
195faaf05b0SBen Gardon 	return sp->role.smm ? 1 : 0;
196faaf05b0SBen Gardon }
197faaf05b0SBen Gardon 
198f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
199f8e14497SBen Gardon {
200f8e14497SBen Gardon 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
201f8e14497SBen Gardon 		return;
202f8e14497SBen Gardon 
203f8e14497SBen Gardon 	if (is_accessed_spte(old_spte) &&
20464bb2769SSean Christopherson 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
20564bb2769SSean Christopherson 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
206f8e14497SBen Gardon 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
207f8e14497SBen Gardon }
208f8e14497SBen Gardon 
209a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
210a6a0b05dSBen Gardon 					  u64 old_spte, u64 new_spte, int level)
211a6a0b05dSBen Gardon {
212a6a0b05dSBen Gardon 	bool pfn_changed;
213a6a0b05dSBen Gardon 	struct kvm_memory_slot *slot;
214a6a0b05dSBen Gardon 
215a6a0b05dSBen Gardon 	if (level > PG_LEVEL_4K)
216a6a0b05dSBen Gardon 		return;
217a6a0b05dSBen Gardon 
218a6a0b05dSBen Gardon 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
219a6a0b05dSBen Gardon 
220a6a0b05dSBen Gardon 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
221a6a0b05dSBen Gardon 	    is_writable_pte(new_spte)) {
222a6a0b05dSBen Gardon 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
223fb04a1edSPeter Xu 		mark_page_dirty_in_slot(kvm, slot, gfn);
224a6a0b05dSBen Gardon 	}
225a6a0b05dSBen Gardon }
226a6a0b05dSBen Gardon 
2272f2fad08SBen Gardon /**
228a9442f59SBen Gardon  * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
229a9442f59SBen Gardon  *
230a9442f59SBen Gardon  * @kvm: kvm instance
231a9442f59SBen Gardon  * @sp: the new page
2329a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2339a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2349a77daacSBen Gardon  *	    threads that might be adding or removing pages.
235a9442f59SBen Gardon  * @account_nx: This page replaces a NX large page and should be marked for
236a9442f59SBen Gardon  *		eventual reclaim.
237a9442f59SBen Gardon  */
238a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2399a77daacSBen Gardon 			      bool shared, bool account_nx)
240a9442f59SBen Gardon {
2419a77daacSBen Gardon 	if (shared)
2429a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2439a77daacSBen Gardon 	else
244a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
245a9442f59SBen Gardon 
246a9442f59SBen Gardon 	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
247a9442f59SBen Gardon 	if (account_nx)
248a9442f59SBen Gardon 		account_huge_nx_page(kvm, sp);
2499a77daacSBen Gardon 
2509a77daacSBen Gardon 	if (shared)
2519a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
252a9442f59SBen Gardon }
253a9442f59SBen Gardon 
254a9442f59SBen Gardon /**
255a9442f59SBen Gardon  * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
256a9442f59SBen Gardon  *
257a9442f59SBen Gardon  * @kvm: kvm instance
258a9442f59SBen Gardon  * @sp: the page to be removed
2599a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
2609a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
2619a77daacSBen Gardon  *	    threads that might be adding or removing pages.
262a9442f59SBen Gardon  */
2639a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2649a77daacSBen Gardon 				bool shared)
265a9442f59SBen Gardon {
2669a77daacSBen Gardon 	if (shared)
2679a77daacSBen Gardon 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
2689a77daacSBen Gardon 	else
269a9442f59SBen Gardon 		lockdep_assert_held_write(&kvm->mmu_lock);
270a9442f59SBen Gardon 
271a9442f59SBen Gardon 	list_del(&sp->link);
272a9442f59SBen Gardon 	if (sp->lpage_disallowed)
273a9442f59SBen Gardon 		unaccount_huge_nx_page(kvm, sp);
2749a77daacSBen Gardon 
2759a77daacSBen Gardon 	if (shared)
2769a77daacSBen Gardon 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
277a9442f59SBen Gardon }
278a9442f59SBen Gardon 
279a9442f59SBen Gardon /**
280a066e61fSBen Gardon  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
281a066e61fSBen Gardon  *
282a066e61fSBen Gardon  * @kvm: kvm instance
283a066e61fSBen Gardon  * @pt: the page removed from the paging structure
2849a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use
2859a77daacSBen Gardon  *	    of the MMU lock and the operation must synchronize with other
2869a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
287a066e61fSBen Gardon  *
288a066e61fSBen Gardon  * Given a page table that has been removed from the TDP paging structure,
289a066e61fSBen Gardon  * iterates through the page table to clear SPTEs and free child page tables.
290a066e61fSBen Gardon  */
2919a77daacSBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
2929a77daacSBen Gardon 					bool shared)
293a066e61fSBen Gardon {
294a066e61fSBen Gardon 	struct kvm_mmu_page *sp = sptep_to_sp(pt);
295a066e61fSBen Gardon 	int level = sp->role.level;
296e25f0e0cSBen Gardon 	gfn_t base_gfn = sp->gfn;
297a066e61fSBen Gardon 	u64 old_child_spte;
2989a77daacSBen Gardon 	u64 *sptep;
299e25f0e0cSBen Gardon 	gfn_t gfn;
300a066e61fSBen Gardon 	int i;
301a066e61fSBen Gardon 
302a066e61fSBen Gardon 	trace_kvm_mmu_prepare_zap_page(sp);
303a066e61fSBen Gardon 
3049a77daacSBen Gardon 	tdp_mmu_unlink_page(kvm, sp, shared);
305a066e61fSBen Gardon 
306a066e61fSBen Gardon 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
3079a77daacSBen Gardon 		sptep = pt + i;
308e25f0e0cSBen Gardon 		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
3099a77daacSBen Gardon 
3109a77daacSBen Gardon 		if (shared) {
311e25f0e0cSBen Gardon 			/*
312e25f0e0cSBen Gardon 			 * Set the SPTE to a nonpresent value that other
313e25f0e0cSBen Gardon 			 * threads will not overwrite. If the SPTE was
314e25f0e0cSBen Gardon 			 * already marked as removed then another thread
315e25f0e0cSBen Gardon 			 * handling a page fault could overwrite it, so
316e25f0e0cSBen Gardon 			 * set the SPTE until it is set from some other
317e25f0e0cSBen Gardon 			 * value to the removed SPTE value.
318e25f0e0cSBen Gardon 			 */
319e25f0e0cSBen Gardon 			for (;;) {
320e25f0e0cSBen Gardon 				old_child_spte = xchg(sptep, REMOVED_SPTE);
321e25f0e0cSBen Gardon 				if (!is_removed_spte(old_child_spte))
322e25f0e0cSBen Gardon 					break;
323e25f0e0cSBen Gardon 				cpu_relax();
324e25f0e0cSBen Gardon 			}
3259a77daacSBen Gardon 		} else {
3268df9f1afSSean Christopherson 			/*
3278df9f1afSSean Christopherson 			 * If the SPTE is not MMU-present, there is no backing
3288df9f1afSSean Christopherson 			 * page associated with the SPTE and so no side effects
3298df9f1afSSean Christopherson 			 * that need to be recorded, and exclusive ownership of
3308df9f1afSSean Christopherson 			 * mmu_lock ensures the SPTE can't be made present.
3318df9f1afSSean Christopherson 			 * Note, zapping MMIO SPTEs is also unnecessary as they
3328df9f1afSSean Christopherson 			 * are guarded by the memslots generation, not by being
3338df9f1afSSean Christopherson 			 * unreachable.
3348df9f1afSSean Christopherson 			 */
3359a77daacSBen Gardon 			old_child_spte = READ_ONCE(*sptep);
3368df9f1afSSean Christopherson 			if (!is_shadow_present_pte(old_child_spte))
3378df9f1afSSean Christopherson 				continue;
338e25f0e0cSBen Gardon 
339e25f0e0cSBen Gardon 			/*
340e25f0e0cSBen Gardon 			 * Marking the SPTE as a removed SPTE is not
341e25f0e0cSBen Gardon 			 * strictly necessary here as the MMU lock will
342e25f0e0cSBen Gardon 			 * stop other threads from concurrently modifying
343e25f0e0cSBen Gardon 			 * this SPTE. Using the removed SPTE value keeps
344e25f0e0cSBen Gardon 			 * the two branches consistent and simplifies
345e25f0e0cSBen Gardon 			 * the function.
346e25f0e0cSBen Gardon 			 */
347e25f0e0cSBen Gardon 			WRITE_ONCE(*sptep, REMOVED_SPTE);
3489a77daacSBen Gardon 		}
349e25f0e0cSBen Gardon 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
350e25f0e0cSBen Gardon 				    old_child_spte, REMOVED_SPTE, level - 1,
351e25f0e0cSBen Gardon 				    shared);
352a066e61fSBen Gardon 	}
353a066e61fSBen Gardon 
354a066e61fSBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, gfn,
355a066e61fSBen Gardon 					   KVM_PAGES_PER_HPAGE(level));
356a066e61fSBen Gardon 
3577cca2d0bSBen Gardon 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
358a066e61fSBen Gardon }
359a066e61fSBen Gardon 
360a066e61fSBen Gardon /**
3612f2fad08SBen Gardon  * handle_changed_spte - handle bookkeeping associated with an SPTE change
3622f2fad08SBen Gardon  * @kvm: kvm instance
3632f2fad08SBen Gardon  * @as_id: the address space of the paging structure the SPTE was a part of
3642f2fad08SBen Gardon  * @gfn: the base GFN that was mapped by the SPTE
3652f2fad08SBen Gardon  * @old_spte: The value of the SPTE before the change
3662f2fad08SBen Gardon  * @new_spte: The value of the SPTE after the change
3672f2fad08SBen Gardon  * @level: the level of the PT the SPTE is part of in the paging structure
3689a77daacSBen Gardon  * @shared: This operation may not be running under the exclusive use of
3699a77daacSBen Gardon  *	    the MMU lock and the operation must synchronize with other
3709a77daacSBen Gardon  *	    threads that might be modifying SPTEs.
3712f2fad08SBen Gardon  *
3722f2fad08SBen Gardon  * Handle bookkeeping that might result from the modification of a SPTE.
3732f2fad08SBen Gardon  * This function must be called for all TDP SPTE modifications.
3742f2fad08SBen Gardon  */
3752f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
3769a77daacSBen Gardon 				  u64 old_spte, u64 new_spte, int level,
3779a77daacSBen Gardon 				  bool shared)
3782f2fad08SBen Gardon {
3792f2fad08SBen Gardon 	bool was_present = is_shadow_present_pte(old_spte);
3802f2fad08SBen Gardon 	bool is_present = is_shadow_present_pte(new_spte);
3812f2fad08SBen Gardon 	bool was_leaf = was_present && is_last_spte(old_spte, level);
3822f2fad08SBen Gardon 	bool is_leaf = is_present && is_last_spte(new_spte, level);
3832f2fad08SBen Gardon 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
3842f2fad08SBen Gardon 
3852f2fad08SBen Gardon 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
3862f2fad08SBen Gardon 	WARN_ON(level < PG_LEVEL_4K);
387764388ceSSean Christopherson 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
3882f2fad08SBen Gardon 
3892f2fad08SBen Gardon 	/*
3902f2fad08SBen Gardon 	 * If this warning were to trigger it would indicate that there was a
3912f2fad08SBen Gardon 	 * missing MMU notifier or a race with some notifier handler.
3922f2fad08SBen Gardon 	 * A present, leaf SPTE should never be directly replaced with another
3932f2fad08SBen Gardon 	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
3942f2fad08SBen Gardon 	 * should be zapping the SPTE before the main MM's page table is
3952f2fad08SBen Gardon 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
3962f2fad08SBen Gardon 	 * thread before replacement.
3972f2fad08SBen Gardon 	 */
3982f2fad08SBen Gardon 	if (was_leaf && is_leaf && pfn_changed) {
3992f2fad08SBen Gardon 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
4002f2fad08SBen Gardon 		       "SPTE with another present leaf SPTE mapping a\n"
4012f2fad08SBen Gardon 		       "different PFN!\n"
4022f2fad08SBen Gardon 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4032f2fad08SBen Gardon 		       as_id, gfn, old_spte, new_spte, level);
4042f2fad08SBen Gardon 
4052f2fad08SBen Gardon 		/*
4062f2fad08SBen Gardon 		 * Crash the host to prevent error propagation and guest data
4072f2fad08SBen Gardon 		 * courruption.
4082f2fad08SBen Gardon 		 */
4092f2fad08SBen Gardon 		BUG();
4102f2fad08SBen Gardon 	}
4112f2fad08SBen Gardon 
4122f2fad08SBen Gardon 	if (old_spte == new_spte)
4132f2fad08SBen Gardon 		return;
4142f2fad08SBen Gardon 
415b9a98c34SBen Gardon 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
416b9a98c34SBen Gardon 
4172f2fad08SBen Gardon 	/*
4182f2fad08SBen Gardon 	 * The only times a SPTE should be changed from a non-present to
4192f2fad08SBen Gardon 	 * non-present state is when an MMIO entry is installed/modified/
4202f2fad08SBen Gardon 	 * removed. In that case, there is nothing to do here.
4212f2fad08SBen Gardon 	 */
4222f2fad08SBen Gardon 	if (!was_present && !is_present) {
4232f2fad08SBen Gardon 		/*
42408f07c80SBen Gardon 		 * If this change does not involve a MMIO SPTE or removed SPTE,
42508f07c80SBen Gardon 		 * it is unexpected. Log the change, though it should not
42608f07c80SBen Gardon 		 * impact the guest since both the former and current SPTEs
42708f07c80SBen Gardon 		 * are nonpresent.
4282f2fad08SBen Gardon 		 */
42908f07c80SBen Gardon 		if (WARN_ON(!is_mmio_spte(old_spte) &&
43008f07c80SBen Gardon 			    !is_mmio_spte(new_spte) &&
43108f07c80SBen Gardon 			    !is_removed_spte(new_spte)))
4322f2fad08SBen Gardon 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
4332f2fad08SBen Gardon 			       "should not be replaced with another,\n"
4342f2fad08SBen Gardon 			       "different nonpresent SPTE, unless one or both\n"
43508f07c80SBen Gardon 			       "are MMIO SPTEs, or the new SPTE is\n"
43608f07c80SBen Gardon 			       "a temporary removed SPTE.\n"
4372f2fad08SBen Gardon 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
4382f2fad08SBen Gardon 			       as_id, gfn, old_spte, new_spte, level);
4392f2fad08SBen Gardon 		return;
4402f2fad08SBen Gardon 	}
4412f2fad08SBen Gardon 
4422f2fad08SBen Gardon 
4432f2fad08SBen Gardon 	if (was_leaf && is_dirty_spte(old_spte) &&
44464bb2769SSean Christopherson 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
4452f2fad08SBen Gardon 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
4462f2fad08SBen Gardon 
4472f2fad08SBen Gardon 	/*
4482f2fad08SBen Gardon 	 * Recursively handle child PTs if the change removed a subtree from
4492f2fad08SBen Gardon 	 * the paging structure.
4502f2fad08SBen Gardon 	 */
451a066e61fSBen Gardon 	if (was_present && !was_leaf && (pfn_changed || !is_present))
452a066e61fSBen Gardon 		handle_removed_tdp_mmu_page(kvm,
4539a77daacSBen Gardon 				spte_to_child_pt(old_spte, level), shared);
4542f2fad08SBen Gardon }
4552f2fad08SBen Gardon 
4562f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
4579a77daacSBen Gardon 				u64 old_spte, u64 new_spte, int level,
4589a77daacSBen Gardon 				bool shared)
4592f2fad08SBen Gardon {
4609a77daacSBen Gardon 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
4619a77daacSBen Gardon 			      shared);
462f8e14497SBen Gardon 	handle_changed_spte_acc_track(old_spte, new_spte, level);
463a6a0b05dSBen Gardon 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
464a6a0b05dSBen Gardon 				      new_spte, level);
4652f2fad08SBen Gardon }
466faaf05b0SBen Gardon 
467fe43fa2fSBen Gardon /*
4689a77daacSBen Gardon  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
4699a77daacSBen Gardon  * associated bookkeeping
4709a77daacSBen Gardon  *
4719a77daacSBen Gardon  * @kvm: kvm instance
4729a77daacSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
4739a77daacSBen Gardon  * @new_spte: The value the SPTE should be set to
4749a77daacSBen Gardon  * Returns: true if the SPTE was set, false if it was not. If false is returned,
4759a77daacSBen Gardon  *	    this function will have no side-effects.
4769a77daacSBen Gardon  */
4779a77daacSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
4789a77daacSBen Gardon 					   struct tdp_iter *iter,
4799a77daacSBen Gardon 					   u64 new_spte)
4809a77daacSBen Gardon {
4819a77daacSBen Gardon 	u64 *root_pt = tdp_iter_root_pt(iter);
4829a77daacSBen Gardon 	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
4839a77daacSBen Gardon 	int as_id = kvm_mmu_page_as_id(root);
4849a77daacSBen Gardon 
4859a77daacSBen Gardon 	lockdep_assert_held_read(&kvm->mmu_lock);
4869a77daacSBen Gardon 
48708f07c80SBen Gardon 	/*
48808f07c80SBen Gardon 	 * Do not change removed SPTEs. Only the thread that froze the SPTE
48908f07c80SBen Gardon 	 * may modify it.
49008f07c80SBen Gardon 	 */
49108f07c80SBen Gardon 	if (iter->old_spte == REMOVED_SPTE)
49208f07c80SBen Gardon 		return false;
49308f07c80SBen Gardon 
4949a77daacSBen Gardon 	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
4959a77daacSBen Gardon 		      new_spte) != iter->old_spte)
4969a77daacSBen Gardon 		return false;
4979a77daacSBen Gardon 
4989a77daacSBen Gardon 	handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
4999a77daacSBen Gardon 			    iter->level, true);
5009a77daacSBen Gardon 
5019a77daacSBen Gardon 	return true;
5029a77daacSBen Gardon }
5039a77daacSBen Gardon 
50408f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
50508f07c80SBen Gardon 					   struct tdp_iter *iter)
50608f07c80SBen Gardon {
50708f07c80SBen Gardon 	/*
50808f07c80SBen Gardon 	 * Freeze the SPTE by setting it to a special,
50908f07c80SBen Gardon 	 * non-present value. This will stop other threads from
51008f07c80SBen Gardon 	 * immediately installing a present entry in its place
51108f07c80SBen Gardon 	 * before the TLBs are flushed.
51208f07c80SBen Gardon 	 */
51308f07c80SBen Gardon 	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
51408f07c80SBen Gardon 		return false;
51508f07c80SBen Gardon 
51608f07c80SBen Gardon 	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
51708f07c80SBen Gardon 					   KVM_PAGES_PER_HPAGE(iter->level));
51808f07c80SBen Gardon 
51908f07c80SBen Gardon 	/*
52008f07c80SBen Gardon 	 * No other thread can overwrite the removed SPTE as they
52108f07c80SBen Gardon 	 * must either wait on the MMU lock or use
52208f07c80SBen Gardon 	 * tdp_mmu_set_spte_atomic which will not overrite the
52308f07c80SBen Gardon 	 * special removed SPTE value. No bookkeeping is needed
52408f07c80SBen Gardon 	 * here since the SPTE is going from non-present
52508f07c80SBen Gardon 	 * to non-present.
52608f07c80SBen Gardon 	 */
52708f07c80SBen Gardon 	WRITE_ONCE(*iter->sptep, 0);
52808f07c80SBen Gardon 
52908f07c80SBen Gardon 	return true;
53008f07c80SBen Gardon }
53108f07c80SBen Gardon 
5329a77daacSBen Gardon 
5339a77daacSBen Gardon /*
534fe43fa2fSBen Gardon  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
535fe43fa2fSBen Gardon  * @kvm: kvm instance
536fe43fa2fSBen Gardon  * @iter: a tdp_iter instance currently on the SPTE that should be set
537fe43fa2fSBen Gardon  * @new_spte: The value the SPTE should be set to
538fe43fa2fSBen Gardon  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
539fe43fa2fSBen Gardon  *		      of the page. Should be set unless handling an MMU
540fe43fa2fSBen Gardon  *		      notifier for access tracking. Leaving record_acc_track
541fe43fa2fSBen Gardon  *		      unset in that case prevents page accesses from being
542fe43fa2fSBen Gardon  *		      double counted.
543fe43fa2fSBen Gardon  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
544fe43fa2fSBen Gardon  *		      appropriate for the change being made. Should be set
545fe43fa2fSBen Gardon  *		      unless performing certain dirty logging operations.
546fe43fa2fSBen Gardon  *		      Leaving record_dirty_log unset in that case prevents page
547fe43fa2fSBen Gardon  *		      writes from being double counted.
548fe43fa2fSBen Gardon  */
549f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
550a6a0b05dSBen Gardon 				      u64 new_spte, bool record_acc_track,
551a6a0b05dSBen Gardon 				      bool record_dirty_log)
552faaf05b0SBen Gardon {
5537cca2d0bSBen Gardon 	tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
554faaf05b0SBen Gardon 	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
555faaf05b0SBen Gardon 	int as_id = kvm_mmu_page_as_id(root);
556faaf05b0SBen Gardon 
557531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
5583a9a4aa5SBen Gardon 
55908f07c80SBen Gardon 	/*
56008f07c80SBen Gardon 	 * No thread should be using this function to set SPTEs to the
56108f07c80SBen Gardon 	 * temporary removed SPTE value.
56208f07c80SBen Gardon 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
56308f07c80SBen Gardon 	 * should be used. If operating under the MMU lock in write mode, the
56408f07c80SBen Gardon 	 * use of the removed SPTE should not be necessary.
56508f07c80SBen Gardon 	 */
56608f07c80SBen Gardon 	WARN_ON(iter->old_spte == REMOVED_SPTE);
56708f07c80SBen Gardon 
5687cca2d0bSBen Gardon 	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
569faaf05b0SBen Gardon 
570f8e14497SBen Gardon 	__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
5719a77daacSBen Gardon 			      iter->level, false);
572f8e14497SBen Gardon 	if (record_acc_track)
573f8e14497SBen Gardon 		handle_changed_spte_acc_track(iter->old_spte, new_spte,
574f8e14497SBen Gardon 					      iter->level);
575a6a0b05dSBen Gardon 	if (record_dirty_log)
576a6a0b05dSBen Gardon 		handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
577a6a0b05dSBen Gardon 					      iter->old_spte, new_spte,
578a6a0b05dSBen Gardon 					      iter->level);
579f8e14497SBen Gardon }
580f8e14497SBen Gardon 
581f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
582f8e14497SBen Gardon 				    u64 new_spte)
583f8e14497SBen Gardon {
584a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
585f8e14497SBen Gardon }
586f8e14497SBen Gardon 
587f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
588f8e14497SBen Gardon 						 struct tdp_iter *iter,
589f8e14497SBen Gardon 						 u64 new_spte)
590f8e14497SBen Gardon {
591a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
592a6a0b05dSBen Gardon }
593a6a0b05dSBen Gardon 
594a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
595a6a0b05dSBen Gardon 						 struct tdp_iter *iter,
596a6a0b05dSBen Gardon 						 u64 new_spte)
597a6a0b05dSBen Gardon {
598a6a0b05dSBen Gardon 	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
599faaf05b0SBen Gardon }
600faaf05b0SBen Gardon 
601faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
602faaf05b0SBen Gardon 	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
603faaf05b0SBen Gardon 
604f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
605f8e14497SBen Gardon 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
606f8e14497SBen Gardon 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
607f8e14497SBen Gardon 		    !is_last_spte(_iter.old_spte, _iter.level))		\
608f8e14497SBen Gardon 			continue;					\
609f8e14497SBen Gardon 		else
610f8e14497SBen Gardon 
611bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
612bb18842eSBen Gardon 	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
613bb18842eSBen Gardon 			 _mmu->shadow_root_level, _start, _end)
614bb18842eSBen Gardon 
615faaf05b0SBen Gardon /*
616e28a436cSBen Gardon  * Yield if the MMU lock is contended or this thread needs to return control
617e28a436cSBen Gardon  * to the scheduler.
618e28a436cSBen Gardon  *
619e139a34eSBen Gardon  * If this function should yield and flush is set, it will perform a remote
620e139a34eSBen Gardon  * TLB flush before yielding.
621e139a34eSBen Gardon  *
622e28a436cSBen Gardon  * If this function yields, it will also reset the tdp_iter's walk over the
623ed5e484bSBen Gardon  * paging structure and the calling function should skip to the next
624ed5e484bSBen Gardon  * iteration to allow the iterator to continue its traversal from the
625ed5e484bSBen Gardon  * paging structure root.
626e28a436cSBen Gardon  *
627e28a436cSBen Gardon  * Return true if this function yielded and the iterator's traversal was reset.
628e28a436cSBen Gardon  * Return false if a yield was not needed.
629e28a436cSBen Gardon  */
630e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
631e139a34eSBen Gardon 					     struct tdp_iter *iter, bool flush)
632a6a0b05dSBen Gardon {
633ed5e484bSBen Gardon 	/* Ensure forward progress has been made before yielding. */
634ed5e484bSBen Gardon 	if (iter->next_last_level_gfn == iter->yielded_gfn)
635ed5e484bSBen Gardon 		return false;
636ed5e484bSBen Gardon 
637531810caSBen Gardon 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6387cca2d0bSBen Gardon 		rcu_read_unlock();
6397cca2d0bSBen Gardon 
640e139a34eSBen Gardon 		if (flush)
641e139a34eSBen Gardon 			kvm_flush_remote_tlbs(kvm);
642e139a34eSBen Gardon 
643531810caSBen Gardon 		cond_resched_rwlock_write(&kvm->mmu_lock);
6447cca2d0bSBen Gardon 		rcu_read_lock();
645ed5e484bSBen Gardon 
646ed5e484bSBen Gardon 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
647ed5e484bSBen Gardon 
648ed5e484bSBen Gardon 		tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
649ed5e484bSBen Gardon 			       iter->root_level, iter->min_level,
650ed5e484bSBen Gardon 			       iter->next_last_level_gfn);
651ed5e484bSBen Gardon 
652e28a436cSBen Gardon 		return true;
653a6a0b05dSBen Gardon 	}
654e28a436cSBen Gardon 
655e28a436cSBen Gardon 	return false;
656a6a0b05dSBen Gardon }
657a6a0b05dSBen Gardon 
658faaf05b0SBen Gardon /*
659faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
660faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
661faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
662faaf05b0SBen Gardon  * MMU lock.
663063afacdSBen Gardon  * If can_yield is true, will release the MMU lock and reschedule if the
664063afacdSBen Gardon  * scheduler needs the CPU or there is contention on the MMU lock. If this
665063afacdSBen Gardon  * function cannot yield, it will not release the MMU lock or reschedule and
666063afacdSBen Gardon  * the caller must ensure it does not supply too large a GFN range, or the
667063afacdSBen Gardon  * operation can cause a soft lockup.
668faaf05b0SBen Gardon  */
669faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
670063afacdSBen Gardon 			  gfn_t start, gfn_t end, bool can_yield)
671faaf05b0SBen Gardon {
672faaf05b0SBen Gardon 	struct tdp_iter iter;
673faaf05b0SBen Gardon 	bool flush_needed = false;
674faaf05b0SBen Gardon 
6757cca2d0bSBen Gardon 	rcu_read_lock();
6767cca2d0bSBen Gardon 
677faaf05b0SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
6781af4a960SBen Gardon 		if (can_yield &&
6791af4a960SBen Gardon 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
6801af4a960SBen Gardon 			flush_needed = false;
6811af4a960SBen Gardon 			continue;
6821af4a960SBen Gardon 		}
6831af4a960SBen Gardon 
684faaf05b0SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
685faaf05b0SBen Gardon 			continue;
686faaf05b0SBen Gardon 
687faaf05b0SBen Gardon 		/*
688faaf05b0SBen Gardon 		 * If this is a non-last-level SPTE that covers a larger range
689faaf05b0SBen Gardon 		 * than should be zapped, continue, and zap the mappings at a
690faaf05b0SBen Gardon 		 * lower level.
691faaf05b0SBen Gardon 		 */
692faaf05b0SBen Gardon 		if ((iter.gfn < start ||
693faaf05b0SBen Gardon 		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
694faaf05b0SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
695faaf05b0SBen Gardon 			continue;
696faaf05b0SBen Gardon 
697faaf05b0SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
6981af4a960SBen Gardon 		flush_needed = true;
699faaf05b0SBen Gardon 	}
7007cca2d0bSBen Gardon 
7017cca2d0bSBen Gardon 	rcu_read_unlock();
702faaf05b0SBen Gardon 	return flush_needed;
703faaf05b0SBen Gardon }
704faaf05b0SBen Gardon 
705faaf05b0SBen Gardon /*
706faaf05b0SBen Gardon  * Tears down the mappings for the range of gfns, [start, end), and frees the
707faaf05b0SBen Gardon  * non-root pages mapping GFNs strictly within that range. Returns true if
708faaf05b0SBen Gardon  * SPTEs have been cleared and a TLB flush is needed before releasing the
709faaf05b0SBen Gardon  * MMU lock.
710faaf05b0SBen Gardon  */
711faaf05b0SBen Gardon bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
712faaf05b0SBen Gardon {
713faaf05b0SBen Gardon 	struct kvm_mmu_page *root;
714faaf05b0SBen Gardon 	bool flush = false;
715faaf05b0SBen Gardon 
716a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root)
717063afacdSBen Gardon 		flush |= zap_gfn_range(kvm, root, start, end, true);
718faaf05b0SBen Gardon 
719faaf05b0SBen Gardon 	return flush;
720faaf05b0SBen Gardon }
721faaf05b0SBen Gardon 
722faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm)
723faaf05b0SBen Gardon {
724339f5a7fSRick Edgecombe 	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
725faaf05b0SBen Gardon 	bool flush;
726faaf05b0SBen Gardon 
727faaf05b0SBen Gardon 	flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
728faaf05b0SBen Gardon 	if (flush)
729faaf05b0SBen Gardon 		kvm_flush_remote_tlbs(kvm);
730faaf05b0SBen Gardon }
731bb18842eSBen Gardon 
732bb18842eSBen Gardon /*
733bb18842eSBen Gardon  * Installs a last-level SPTE to handle a TDP page fault.
734bb18842eSBen Gardon  * (NPT/EPT violation/misconfiguration)
735bb18842eSBen Gardon  */
736bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
737bb18842eSBen Gardon 					  int map_writable,
738bb18842eSBen Gardon 					  struct tdp_iter *iter,
739bb18842eSBen Gardon 					  kvm_pfn_t pfn, bool prefault)
740bb18842eSBen Gardon {
741bb18842eSBen Gardon 	u64 new_spte;
742bb18842eSBen Gardon 	int ret = 0;
743bb18842eSBen Gardon 	int make_spte_ret = 0;
744bb18842eSBen Gardon 
7459a77daacSBen Gardon 	if (unlikely(is_noslot_pfn(pfn)))
746bb18842eSBen Gardon 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
7479a77daacSBen Gardon 	else
748bb18842eSBen Gardon 		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
749bb18842eSBen Gardon 					 pfn, iter->old_spte, prefault, true,
750bb18842eSBen Gardon 					 map_writable, !shadow_accessed_mask,
751bb18842eSBen Gardon 					 &new_spte);
752bb18842eSBen Gardon 
753bb18842eSBen Gardon 	if (new_spte == iter->old_spte)
754bb18842eSBen Gardon 		ret = RET_PF_SPURIOUS;
7559a77daacSBen Gardon 	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
7569a77daacSBen Gardon 		return RET_PF_RETRY;
757bb18842eSBen Gardon 
758bb18842eSBen Gardon 	/*
759bb18842eSBen Gardon 	 * If the page fault was caused by a write but the page is write
760bb18842eSBen Gardon 	 * protected, emulation is needed. If the emulation was skipped,
761bb18842eSBen Gardon 	 * the vCPU would have the same fault again.
762bb18842eSBen Gardon 	 */
763bb18842eSBen Gardon 	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
764bb18842eSBen Gardon 		if (write)
765bb18842eSBen Gardon 			ret = RET_PF_EMULATE;
766bb18842eSBen Gardon 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
767bb18842eSBen Gardon 	}
768bb18842eSBen Gardon 
769bb18842eSBen Gardon 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
7709a77daacSBen Gardon 	if (unlikely(is_mmio_spte(new_spte))) {
7719a77daacSBen Gardon 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
7729a77daacSBen Gardon 				     new_spte);
773bb18842eSBen Gardon 		ret = RET_PF_EMULATE;
774*3849e092SSean Christopherson 	} else {
7759a77daacSBen Gardon 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
7769a77daacSBen Gardon 				       rcu_dereference(iter->sptep));
777*3849e092SSean Christopherson 	}
778bb18842eSBen Gardon 
779bb18842eSBen Gardon 	if (!prefault)
780bb18842eSBen Gardon 		vcpu->stat.pf_fixed++;
781bb18842eSBen Gardon 
782bb18842eSBen Gardon 	return ret;
783bb18842eSBen Gardon }
784bb18842eSBen Gardon 
785bb18842eSBen Gardon /*
786bb18842eSBen Gardon  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
787bb18842eSBen Gardon  * page tables and SPTEs to translate the faulting guest physical address.
788bb18842eSBen Gardon  */
789bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
790bb18842eSBen Gardon 		    int map_writable, int max_level, kvm_pfn_t pfn,
791bb18842eSBen Gardon 		    bool prefault)
792bb18842eSBen Gardon {
793bb18842eSBen Gardon 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
794bb18842eSBen Gardon 	bool write = error_code & PFERR_WRITE_MASK;
795bb18842eSBen Gardon 	bool exec = error_code & PFERR_FETCH_MASK;
796bb18842eSBen Gardon 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
797bb18842eSBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
798bb18842eSBen Gardon 	struct tdp_iter iter;
79989c0fd49SBen Gardon 	struct kvm_mmu_page *sp;
800bb18842eSBen Gardon 	u64 *child_pt;
801bb18842eSBen Gardon 	u64 new_spte;
802bb18842eSBen Gardon 	int ret;
803bb18842eSBen Gardon 	gfn_t gfn = gpa >> PAGE_SHIFT;
804bb18842eSBen Gardon 	int level;
805bb18842eSBen Gardon 	int req_level;
806bb18842eSBen Gardon 
807bb18842eSBen Gardon 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
808bb18842eSBen Gardon 		return RET_PF_RETRY;
809bb18842eSBen Gardon 	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
810bb18842eSBen Gardon 		return RET_PF_RETRY;
811bb18842eSBen Gardon 
812bb18842eSBen Gardon 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
813bb18842eSBen Gardon 					huge_page_disallowed, &req_level);
814bb18842eSBen Gardon 
815bb18842eSBen Gardon 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
8167cca2d0bSBen Gardon 
8177cca2d0bSBen Gardon 	rcu_read_lock();
8187cca2d0bSBen Gardon 
819bb18842eSBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
820bb18842eSBen Gardon 		if (nx_huge_page_workaround_enabled)
821bb18842eSBen Gardon 			disallowed_hugepage_adjust(iter.old_spte, gfn,
822bb18842eSBen Gardon 						   iter.level, &pfn, &level);
823bb18842eSBen Gardon 
824bb18842eSBen Gardon 		if (iter.level == level)
825bb18842eSBen Gardon 			break;
826bb18842eSBen Gardon 
827bb18842eSBen Gardon 		/*
828bb18842eSBen Gardon 		 * If there is an SPTE mapping a large page at a higher level
829bb18842eSBen Gardon 		 * than the target, that SPTE must be cleared and replaced
830bb18842eSBen Gardon 		 * with a non-leaf SPTE.
831bb18842eSBen Gardon 		 */
832bb18842eSBen Gardon 		if (is_shadow_present_pte(iter.old_spte) &&
833bb18842eSBen Gardon 		    is_large_pte(iter.old_spte)) {
83408f07c80SBen Gardon 			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
8359a77daacSBen Gardon 				break;
836bb18842eSBen Gardon 
837bb18842eSBen Gardon 			/*
838bb18842eSBen Gardon 			 * The iter must explicitly re-read the spte here
839bb18842eSBen Gardon 			 * because the new value informs the !present
840bb18842eSBen Gardon 			 * path below.
841bb18842eSBen Gardon 			 */
8427cca2d0bSBen Gardon 			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
843bb18842eSBen Gardon 		}
844bb18842eSBen Gardon 
845bb18842eSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte)) {
84689c0fd49SBen Gardon 			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
84789c0fd49SBen Gardon 			child_pt = sp->spt;
848a9442f59SBen Gardon 
849bb18842eSBen Gardon 			new_spte = make_nonleaf_spte(child_pt,
850bb18842eSBen Gardon 						     !shadow_accessed_mask);
851bb18842eSBen Gardon 
8529a77daacSBen Gardon 			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
8539a77daacSBen Gardon 						    new_spte)) {
8549a77daacSBen Gardon 				tdp_mmu_link_page(vcpu->kvm, sp, true,
8559a77daacSBen Gardon 						  huge_page_disallowed &&
8569a77daacSBen Gardon 						  req_level >= iter.level);
8579a77daacSBen Gardon 
858bb18842eSBen Gardon 				trace_kvm_mmu_get_page(sp, true);
8599a77daacSBen Gardon 			} else {
8609a77daacSBen Gardon 				tdp_mmu_free_sp(sp);
8619a77daacSBen Gardon 				break;
8629a77daacSBen Gardon 			}
863bb18842eSBen Gardon 		}
864bb18842eSBen Gardon 	}
865bb18842eSBen Gardon 
8669a77daacSBen Gardon 	if (iter.level != level) {
8677cca2d0bSBen Gardon 		rcu_read_unlock();
868bb18842eSBen Gardon 		return RET_PF_RETRY;
8697cca2d0bSBen Gardon 	}
870bb18842eSBen Gardon 
871bb18842eSBen Gardon 	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
872bb18842eSBen Gardon 					      pfn, prefault);
8737cca2d0bSBen Gardon 	rcu_read_unlock();
874bb18842eSBen Gardon 
875bb18842eSBen Gardon 	return ret;
876bb18842eSBen Gardon }
877063afacdSBen Gardon 
878c1b91493SSean Christopherson typedef int (*tdp_handler_t)(struct kvm *kvm, struct kvm_memory_slot *slot,
879c1b91493SSean Christopherson 			     struct kvm_mmu_page *root, gfn_t start, gfn_t end,
880c1b91493SSean Christopherson 			     unsigned long data);
881c1b91493SSean Christopherson 
882c1b91493SSean Christopherson static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
8838f5c44f9SMaciej S. Szmigiero 							unsigned long start,
8848f5c44f9SMaciej S. Szmigiero 							unsigned long end,
8858f5c44f9SMaciej S. Szmigiero 							unsigned long data,
886c1b91493SSean Christopherson 							tdp_handler_t handler)
887063afacdSBen Gardon {
888063afacdSBen Gardon 	struct kvm_memslots *slots;
889063afacdSBen Gardon 	struct kvm_memory_slot *memslot;
890063afacdSBen Gardon 	struct kvm_mmu_page *root;
891063afacdSBen Gardon 	int ret = 0;
892063afacdSBen Gardon 	int as_id;
893063afacdSBen Gardon 
894a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
895063afacdSBen Gardon 		as_id = kvm_mmu_page_as_id(root);
896063afacdSBen Gardon 		slots = __kvm_memslots(kvm, as_id);
897063afacdSBen Gardon 		kvm_for_each_memslot(memslot, slots) {
898063afacdSBen Gardon 			unsigned long hva_start, hva_end;
899063afacdSBen Gardon 			gfn_t gfn_start, gfn_end;
900063afacdSBen Gardon 
901063afacdSBen Gardon 			hva_start = max(start, memslot->userspace_addr);
902063afacdSBen Gardon 			hva_end = min(end, memslot->userspace_addr +
903063afacdSBen Gardon 				      (memslot->npages << PAGE_SHIFT));
904063afacdSBen Gardon 			if (hva_start >= hva_end)
905063afacdSBen Gardon 				continue;
906063afacdSBen Gardon 			/*
907063afacdSBen Gardon 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
908063afacdSBen Gardon 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
909063afacdSBen Gardon 			 */
910063afacdSBen Gardon 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
911063afacdSBen Gardon 			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
912063afacdSBen Gardon 
913063afacdSBen Gardon 			ret |= handler(kvm, memslot, root, gfn_start,
914063afacdSBen Gardon 				       gfn_end, data);
915063afacdSBen Gardon 		}
916063afacdSBen Gardon 	}
917063afacdSBen Gardon 
918063afacdSBen Gardon 	return ret;
919063afacdSBen Gardon }
920063afacdSBen Gardon 
92120321957SSean Christopherson static __always_inline int kvm_tdp_mmu_handle_hva(struct kvm *kvm,
92220321957SSean Christopherson 						  unsigned long addr,
92320321957SSean Christopherson 						  unsigned long data,
92420321957SSean Christopherson 						  tdp_handler_t handler)
92520321957SSean Christopherson {
92620321957SSean Christopherson 	return kvm_tdp_mmu_handle_hva_range(kvm, addr, addr + 1, data, handler);
92720321957SSean Christopherson }
92820321957SSean Christopherson 
929063afacdSBen Gardon static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
930063afacdSBen Gardon 				     struct kvm_memory_slot *slot,
931063afacdSBen Gardon 				     struct kvm_mmu_page *root, gfn_t start,
932063afacdSBen Gardon 				     gfn_t end, unsigned long unused)
933063afacdSBen Gardon {
934063afacdSBen Gardon 	return zap_gfn_range(kvm, root, start, end, false);
935063afacdSBen Gardon }
936063afacdSBen Gardon 
937063afacdSBen Gardon int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
938063afacdSBen Gardon 			      unsigned long end)
939063afacdSBen Gardon {
940063afacdSBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
941063afacdSBen Gardon 					    zap_gfn_range_hva_wrapper);
942063afacdSBen Gardon }
943f8e14497SBen Gardon 
944f8e14497SBen Gardon /*
945f8e14497SBen Gardon  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
946f8e14497SBen Gardon  * if any of the GFNs in the range have been accessed.
947f8e14497SBen Gardon  */
948f8e14497SBen Gardon static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
949f8e14497SBen Gardon 			 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
950f8e14497SBen Gardon 			 unsigned long unused)
951f8e14497SBen Gardon {
952f8e14497SBen Gardon 	struct tdp_iter iter;
953f8e14497SBen Gardon 	int young = 0;
954f8e14497SBen Gardon 	u64 new_spte = 0;
955f8e14497SBen Gardon 
9567cca2d0bSBen Gardon 	rcu_read_lock();
9577cca2d0bSBen Gardon 
958f8e14497SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
959f8e14497SBen Gardon 		/*
960f8e14497SBen Gardon 		 * If we have a non-accessed entry we don't need to change the
961f8e14497SBen Gardon 		 * pte.
962f8e14497SBen Gardon 		 */
963f8e14497SBen Gardon 		if (!is_accessed_spte(iter.old_spte))
964f8e14497SBen Gardon 			continue;
965f8e14497SBen Gardon 
966f8e14497SBen Gardon 		new_spte = iter.old_spte;
967f8e14497SBen Gardon 
968f8e14497SBen Gardon 		if (spte_ad_enabled(new_spte)) {
969f8e14497SBen Gardon 			clear_bit((ffs(shadow_accessed_mask) - 1),
970f8e14497SBen Gardon 				  (unsigned long *)&new_spte);
971f8e14497SBen Gardon 		} else {
972f8e14497SBen Gardon 			/*
973f8e14497SBen Gardon 			 * Capture the dirty status of the page, so that it doesn't get
974f8e14497SBen Gardon 			 * lost when the SPTE is marked for access tracking.
975f8e14497SBen Gardon 			 */
976f8e14497SBen Gardon 			if (is_writable_pte(new_spte))
977f8e14497SBen Gardon 				kvm_set_pfn_dirty(spte_to_pfn(new_spte));
978f8e14497SBen Gardon 
979f8e14497SBen Gardon 			new_spte = mark_spte_for_access_track(new_spte);
980f8e14497SBen Gardon 		}
981a6a0b05dSBen Gardon 		new_spte &= ~shadow_dirty_mask;
982f8e14497SBen Gardon 
983f8e14497SBen Gardon 		tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
984f8e14497SBen Gardon 		young = 1;
98533dd3574SBen Gardon 
98633dd3574SBen Gardon 		trace_kvm_age_page(iter.gfn, iter.level, slot, young);
987f8e14497SBen Gardon 	}
988f8e14497SBen Gardon 
9897cca2d0bSBen Gardon 	rcu_read_unlock();
9907cca2d0bSBen Gardon 
991f8e14497SBen Gardon 	return young;
992f8e14497SBen Gardon }
993f8e14497SBen Gardon 
994f8e14497SBen Gardon int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
995f8e14497SBen Gardon 			      unsigned long end)
996f8e14497SBen Gardon {
997f8e14497SBen Gardon 	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
998f8e14497SBen Gardon 					    age_gfn_range);
999f8e14497SBen Gardon }
1000f8e14497SBen Gardon 
1001f8e14497SBen Gardon static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
1002e12b785eSSean Christopherson 			struct kvm_mmu_page *root, gfn_t gfn, gfn_t end,
1003e12b785eSSean Christopherson 			unsigned long unused)
1004f8e14497SBen Gardon {
1005f8e14497SBen Gardon 	struct tdp_iter iter;
1006f8e14497SBen Gardon 
1007e12b785eSSean Christopherson 	tdp_root_for_each_leaf_pte(iter, root, gfn, end)
1008f8e14497SBen Gardon 		if (is_accessed_spte(iter.old_spte))
1009f8e14497SBen Gardon 			return 1;
1010f8e14497SBen Gardon 
1011f8e14497SBen Gardon 	return 0;
1012f8e14497SBen Gardon }
1013f8e14497SBen Gardon 
1014f8e14497SBen Gardon int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
1015f8e14497SBen Gardon {
101620321957SSean Christopherson 	return kvm_tdp_mmu_handle_hva(kvm, hva, 0, test_age_gfn);
1017f8e14497SBen Gardon }
10181d8dd6b3SBen Gardon 
10191d8dd6b3SBen Gardon /*
10201d8dd6b3SBen Gardon  * Handle the changed_pte MMU notifier for the TDP MMU.
10211d8dd6b3SBen Gardon  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
10221d8dd6b3SBen Gardon  * notifier.
10231d8dd6b3SBen Gardon  * Returns non-zero if a flush is needed before releasing the MMU lock.
10241d8dd6b3SBen Gardon  */
10251d8dd6b3SBen Gardon static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
102674fe0f54SSean Christopherson 			struct kvm_mmu_page *root, gfn_t gfn, gfn_t end,
10271d8dd6b3SBen Gardon 			unsigned long data)
10281d8dd6b3SBen Gardon {
10291d8dd6b3SBen Gardon 	struct tdp_iter iter;
10301d8dd6b3SBen Gardon 	pte_t *ptep = (pte_t *)data;
10311d8dd6b3SBen Gardon 	kvm_pfn_t new_pfn;
10321d8dd6b3SBen Gardon 	u64 new_spte;
10331d8dd6b3SBen Gardon 	int need_flush = 0;
10341d8dd6b3SBen Gardon 
10357cca2d0bSBen Gardon 	rcu_read_lock();
10367cca2d0bSBen Gardon 
103774fe0f54SSean Christopherson 	WARN_ON(pte_huge(*ptep) || (gfn + 1) != end);
10381d8dd6b3SBen Gardon 
10391d8dd6b3SBen Gardon 	new_pfn = pte_pfn(*ptep);
10401d8dd6b3SBen Gardon 
10411d8dd6b3SBen Gardon 	tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
10421d8dd6b3SBen Gardon 		if (iter.level != PG_LEVEL_4K)
10431d8dd6b3SBen Gardon 			continue;
10441d8dd6b3SBen Gardon 
10451d8dd6b3SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte))
10461d8dd6b3SBen Gardon 			break;
10471d8dd6b3SBen Gardon 
1048f055ab63SSean Christopherson 		/*
1049f055ab63SSean Christopherson 		 * Note, when changing a read-only SPTE, it's not strictly
1050f055ab63SSean Christopherson 		 * necessary to zero the SPTE before setting the new PFN, but
1051f055ab63SSean Christopherson 		 * doing so preserves the invariant that the PFN of a present
1052f055ab63SSean Christopherson 		 * leaf SPTE can never change.  See __handle_changed_spte().
1053f055ab63SSean Christopherson 		 */
10541d8dd6b3SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
10551d8dd6b3SBen Gardon 
10561d8dd6b3SBen Gardon 		if (!pte_write(*ptep)) {
10571d8dd6b3SBen Gardon 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
10581d8dd6b3SBen Gardon 					iter.old_spte, new_pfn);
10591d8dd6b3SBen Gardon 
10601d8dd6b3SBen Gardon 			tdp_mmu_set_spte(kvm, &iter, new_spte);
10611d8dd6b3SBen Gardon 		}
10621d8dd6b3SBen Gardon 
10631d8dd6b3SBen Gardon 		need_flush = 1;
10641d8dd6b3SBen Gardon 	}
10651d8dd6b3SBen Gardon 
10661d8dd6b3SBen Gardon 	if (need_flush)
10671d8dd6b3SBen Gardon 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
10681d8dd6b3SBen Gardon 
10697cca2d0bSBen Gardon 	rcu_read_unlock();
10707cca2d0bSBen Gardon 
10711d8dd6b3SBen Gardon 	return 0;
10721d8dd6b3SBen Gardon }
10731d8dd6b3SBen Gardon 
10741d8dd6b3SBen Gardon int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
10751d8dd6b3SBen Gardon 			     pte_t *host_ptep)
10761d8dd6b3SBen Gardon {
107720321957SSean Christopherson 	return kvm_tdp_mmu_handle_hva(kvm, address, (unsigned long)host_ptep,
10781d8dd6b3SBen Gardon 				      set_tdp_spte);
10791d8dd6b3SBen Gardon }
10801d8dd6b3SBen Gardon 
1081a6a0b05dSBen Gardon /*
1082a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs [start, end). If
1083a6a0b05dSBen Gardon  * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
1084a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1085a6a0b05dSBen Gardon  */
1086a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1087a6a0b05dSBen Gardon 			     gfn_t start, gfn_t end, int min_level)
1088a6a0b05dSBen Gardon {
1089a6a0b05dSBen Gardon 	struct tdp_iter iter;
1090a6a0b05dSBen Gardon 	u64 new_spte;
1091a6a0b05dSBen Gardon 	bool spte_set = false;
1092a6a0b05dSBen Gardon 
10937cca2d0bSBen Gardon 	rcu_read_lock();
10947cca2d0bSBen Gardon 
1095a6a0b05dSBen Gardon 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1096a6a0b05dSBen Gardon 
1097a6a0b05dSBen Gardon 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1098a6a0b05dSBen Gardon 				   min_level, start, end) {
10991af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
11001af4a960SBen Gardon 			continue;
11011af4a960SBen Gardon 
1102a6a0b05dSBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
11030f99ee2cSBen Gardon 		    !is_last_spte(iter.old_spte, iter.level) ||
11040f99ee2cSBen Gardon 		    !(iter.old_spte & PT_WRITABLE_MASK))
1105a6a0b05dSBen Gardon 			continue;
1106a6a0b05dSBen Gardon 
1107a6a0b05dSBen Gardon 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1108a6a0b05dSBen Gardon 
1109a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1110a6a0b05dSBen Gardon 		spte_set = true;
1111a6a0b05dSBen Gardon 	}
11127cca2d0bSBen Gardon 
11137cca2d0bSBen Gardon 	rcu_read_unlock();
1114a6a0b05dSBen Gardon 	return spte_set;
1115a6a0b05dSBen Gardon }
1116a6a0b05dSBen Gardon 
1117a6a0b05dSBen Gardon /*
1118a6a0b05dSBen Gardon  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1119a6a0b05dSBen Gardon  * only affect leaf SPTEs down to min_level.
1120a6a0b05dSBen Gardon  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1121a6a0b05dSBen Gardon  */
1122a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
1123a6a0b05dSBen Gardon 			     int min_level)
1124a6a0b05dSBen Gardon {
1125a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1126a6a0b05dSBen Gardon 	int root_as_id;
1127a6a0b05dSBen Gardon 	bool spte_set = false;
1128a6a0b05dSBen Gardon 
1129a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1130a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1131a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1132a6a0b05dSBen Gardon 			continue;
1133a6a0b05dSBen Gardon 
1134a6a0b05dSBen Gardon 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1135a6a0b05dSBen Gardon 			     slot->base_gfn + slot->npages, min_level);
1136a6a0b05dSBen Gardon 	}
1137a6a0b05dSBen Gardon 
1138a6a0b05dSBen Gardon 	return spte_set;
1139a6a0b05dSBen Gardon }
1140a6a0b05dSBen Gardon 
1141a6a0b05dSBen Gardon /*
1142a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1143a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1144a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1145a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1146a6a0b05dSBen Gardon  * be flushed.
1147a6a0b05dSBen Gardon  */
1148a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1149a6a0b05dSBen Gardon 			   gfn_t start, gfn_t end)
1150a6a0b05dSBen Gardon {
1151a6a0b05dSBen Gardon 	struct tdp_iter iter;
1152a6a0b05dSBen Gardon 	u64 new_spte;
1153a6a0b05dSBen Gardon 	bool spte_set = false;
1154a6a0b05dSBen Gardon 
11557cca2d0bSBen Gardon 	rcu_read_lock();
11567cca2d0bSBen Gardon 
1157a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
11581af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
11591af4a960SBen Gardon 			continue;
11601af4a960SBen Gardon 
1161a6a0b05dSBen Gardon 		if (spte_ad_need_write_protect(iter.old_spte)) {
1162a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1163a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1164a6a0b05dSBen Gardon 			else
1165a6a0b05dSBen Gardon 				continue;
1166a6a0b05dSBen Gardon 		} else {
1167a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1168a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1169a6a0b05dSBen Gardon 			else
1170a6a0b05dSBen Gardon 				continue;
1171a6a0b05dSBen Gardon 		}
1172a6a0b05dSBen Gardon 
1173a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1174a6a0b05dSBen Gardon 		spte_set = true;
1175a6a0b05dSBen Gardon 	}
11767cca2d0bSBen Gardon 
11777cca2d0bSBen Gardon 	rcu_read_unlock();
1178a6a0b05dSBen Gardon 	return spte_set;
1179a6a0b05dSBen Gardon }
1180a6a0b05dSBen Gardon 
1181a6a0b05dSBen Gardon /*
1182a6a0b05dSBen Gardon  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1183a6a0b05dSBen Gardon  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1184a6a0b05dSBen Gardon  * If AD bits are not enabled, this will require clearing the writable bit on
1185a6a0b05dSBen Gardon  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1186a6a0b05dSBen Gardon  * be flushed.
1187a6a0b05dSBen Gardon  */
1188a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
1189a6a0b05dSBen Gardon {
1190a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1191a6a0b05dSBen Gardon 	int root_as_id;
1192a6a0b05dSBen Gardon 	bool spte_set = false;
1193a6a0b05dSBen Gardon 
1194a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1195a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1196a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1197a6a0b05dSBen Gardon 			continue;
1198a6a0b05dSBen Gardon 
1199a6a0b05dSBen Gardon 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1200a6a0b05dSBen Gardon 				slot->base_gfn + slot->npages);
1201a6a0b05dSBen Gardon 	}
1202a6a0b05dSBen Gardon 
1203a6a0b05dSBen Gardon 	return spte_set;
1204a6a0b05dSBen Gardon }
1205a6a0b05dSBen Gardon 
1206a6a0b05dSBen Gardon /*
1207a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1208a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1209a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1210a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1211a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1212a6a0b05dSBen Gardon  */
1213a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1214a6a0b05dSBen Gardon 				  gfn_t gfn, unsigned long mask, bool wrprot)
1215a6a0b05dSBen Gardon {
1216a6a0b05dSBen Gardon 	struct tdp_iter iter;
1217a6a0b05dSBen Gardon 	u64 new_spte;
1218a6a0b05dSBen Gardon 
12197cca2d0bSBen Gardon 	rcu_read_lock();
12207cca2d0bSBen Gardon 
1221a6a0b05dSBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1222a6a0b05dSBen Gardon 				    gfn + BITS_PER_LONG) {
1223a6a0b05dSBen Gardon 		if (!mask)
1224a6a0b05dSBen Gardon 			break;
1225a6a0b05dSBen Gardon 
1226a6a0b05dSBen Gardon 		if (iter.level > PG_LEVEL_4K ||
1227a6a0b05dSBen Gardon 		    !(mask & (1UL << (iter.gfn - gfn))))
1228a6a0b05dSBen Gardon 			continue;
1229a6a0b05dSBen Gardon 
1230f1b3b06aSBen Gardon 		mask &= ~(1UL << (iter.gfn - gfn));
1231f1b3b06aSBen Gardon 
1232a6a0b05dSBen Gardon 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1233a6a0b05dSBen Gardon 			if (is_writable_pte(iter.old_spte))
1234a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1235a6a0b05dSBen Gardon 			else
1236a6a0b05dSBen Gardon 				continue;
1237a6a0b05dSBen Gardon 		} else {
1238a6a0b05dSBen Gardon 			if (iter.old_spte & shadow_dirty_mask)
1239a6a0b05dSBen Gardon 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1240a6a0b05dSBen Gardon 			else
1241a6a0b05dSBen Gardon 				continue;
1242a6a0b05dSBen Gardon 		}
1243a6a0b05dSBen Gardon 
1244a6a0b05dSBen Gardon 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1245a6a0b05dSBen Gardon 	}
12467cca2d0bSBen Gardon 
12477cca2d0bSBen Gardon 	rcu_read_unlock();
1248a6a0b05dSBen Gardon }
1249a6a0b05dSBen Gardon 
1250a6a0b05dSBen Gardon /*
1251a6a0b05dSBen Gardon  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1252a6a0b05dSBen Gardon  * set in mask, starting at gfn. The given memslot is expected to contain all
1253a6a0b05dSBen Gardon  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1254a6a0b05dSBen Gardon  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1255a6a0b05dSBen Gardon  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1256a6a0b05dSBen Gardon  */
1257a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1258a6a0b05dSBen Gardon 				       struct kvm_memory_slot *slot,
1259a6a0b05dSBen Gardon 				       gfn_t gfn, unsigned long mask,
1260a6a0b05dSBen Gardon 				       bool wrprot)
1261a6a0b05dSBen Gardon {
1262a6a0b05dSBen Gardon 	struct kvm_mmu_page *root;
1263a6a0b05dSBen Gardon 	int root_as_id;
1264a6a0b05dSBen Gardon 
1265531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
1266a6a0b05dSBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
1267a6a0b05dSBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
1268a6a0b05dSBen Gardon 		if (root_as_id != slot->as_id)
1269a6a0b05dSBen Gardon 			continue;
1270a6a0b05dSBen Gardon 
1271a6a0b05dSBen Gardon 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1272a6a0b05dSBen Gardon 	}
1273a6a0b05dSBen Gardon }
1274a6a0b05dSBen Gardon 
1275a6a0b05dSBen Gardon /*
127687aa9ec9SBen Gardon  * Clear leaf entries which could be replaced by large mappings, for
127787aa9ec9SBen Gardon  * GFNs within the slot.
127814881998SBen Gardon  */
127914881998SBen Gardon static void zap_collapsible_spte_range(struct kvm *kvm,
128014881998SBen Gardon 				       struct kvm_mmu_page *root,
12819eba50f8SSean Christopherson 				       struct kvm_memory_slot *slot)
128214881998SBen Gardon {
12839eba50f8SSean Christopherson 	gfn_t start = slot->base_gfn;
12849eba50f8SSean Christopherson 	gfn_t end = start + slot->npages;
128514881998SBen Gardon 	struct tdp_iter iter;
128614881998SBen Gardon 	kvm_pfn_t pfn;
128714881998SBen Gardon 	bool spte_set = false;
128814881998SBen Gardon 
12897cca2d0bSBen Gardon 	rcu_read_lock();
12907cca2d0bSBen Gardon 
129114881998SBen Gardon 	tdp_root_for_each_pte(iter, root, start, end) {
12921af4a960SBen Gardon 		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
12931af4a960SBen Gardon 			spte_set = false;
12941af4a960SBen Gardon 			continue;
12951af4a960SBen Gardon 		}
12961af4a960SBen Gardon 
129714881998SBen Gardon 		if (!is_shadow_present_pte(iter.old_spte) ||
129887aa9ec9SBen Gardon 		    !is_last_spte(iter.old_spte, iter.level))
129914881998SBen Gardon 			continue;
130014881998SBen Gardon 
130114881998SBen Gardon 		pfn = spte_to_pfn(iter.old_spte);
130214881998SBen Gardon 		if (kvm_is_reserved_pfn(pfn) ||
13039eba50f8SSean Christopherson 		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
13049eba50f8SSean Christopherson 							    pfn, PG_LEVEL_NUM))
130514881998SBen Gardon 			continue;
130614881998SBen Gardon 
130714881998SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, 0);
130814881998SBen Gardon 
13091af4a960SBen Gardon 		spte_set = true;
131014881998SBen Gardon 	}
131114881998SBen Gardon 
13127cca2d0bSBen Gardon 	rcu_read_unlock();
131314881998SBen Gardon 	if (spte_set)
131414881998SBen Gardon 		kvm_flush_remote_tlbs(kvm);
131514881998SBen Gardon }
131614881998SBen Gardon 
131714881998SBen Gardon /*
131814881998SBen Gardon  * Clear non-leaf entries (and free associated page tables) which could
131914881998SBen Gardon  * be replaced by large mappings, for GFNs within the slot.
132014881998SBen Gardon  */
132114881998SBen Gardon void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
13229eba50f8SSean Christopherson 				       struct kvm_memory_slot *slot)
132314881998SBen Gardon {
132414881998SBen Gardon 	struct kvm_mmu_page *root;
132514881998SBen Gardon 	int root_as_id;
132614881998SBen Gardon 
1327a889ea54SBen Gardon 	for_each_tdp_mmu_root_yield_safe(kvm, root) {
132814881998SBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
132914881998SBen Gardon 		if (root_as_id != slot->as_id)
133014881998SBen Gardon 			continue;
133114881998SBen Gardon 
13329eba50f8SSean Christopherson 		zap_collapsible_spte_range(kvm, root, slot);
133314881998SBen Gardon 	}
133414881998SBen Gardon }
133546044f72SBen Gardon 
133646044f72SBen Gardon /*
133746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
133846044f72SBen Gardon  * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
133946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
134046044f72SBen Gardon  */
134146044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
134246044f72SBen Gardon 			      gfn_t gfn)
134346044f72SBen Gardon {
134446044f72SBen Gardon 	struct tdp_iter iter;
134546044f72SBen Gardon 	u64 new_spte;
134646044f72SBen Gardon 	bool spte_set = false;
134746044f72SBen Gardon 
13487cca2d0bSBen Gardon 	rcu_read_lock();
13497cca2d0bSBen Gardon 
135046044f72SBen Gardon 	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
135146044f72SBen Gardon 		if (!is_writable_pte(iter.old_spte))
135246044f72SBen Gardon 			break;
135346044f72SBen Gardon 
135446044f72SBen Gardon 		new_spte = iter.old_spte &
135546044f72SBen Gardon 			~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
135646044f72SBen Gardon 
135746044f72SBen Gardon 		tdp_mmu_set_spte(kvm, &iter, new_spte);
135846044f72SBen Gardon 		spte_set = true;
135946044f72SBen Gardon 	}
136046044f72SBen Gardon 
13617cca2d0bSBen Gardon 	rcu_read_unlock();
13627cca2d0bSBen Gardon 
136346044f72SBen Gardon 	return spte_set;
136446044f72SBen Gardon }
136546044f72SBen Gardon 
136646044f72SBen Gardon /*
136746044f72SBen Gardon  * Removes write access on the last level SPTE mapping this GFN and unsets the
136846044f72SBen Gardon  * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
136946044f72SBen Gardon  * Returns true if an SPTE was set and a TLB flush is needed.
137046044f72SBen Gardon  */
137146044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
137246044f72SBen Gardon 				   struct kvm_memory_slot *slot, gfn_t gfn)
137346044f72SBen Gardon {
137446044f72SBen Gardon 	struct kvm_mmu_page *root;
137546044f72SBen Gardon 	int root_as_id;
137646044f72SBen Gardon 	bool spte_set = false;
137746044f72SBen Gardon 
1378531810caSBen Gardon 	lockdep_assert_held_write(&kvm->mmu_lock);
137946044f72SBen Gardon 	for_each_tdp_mmu_root(kvm, root) {
138046044f72SBen Gardon 		root_as_id = kvm_mmu_page_as_id(root);
138146044f72SBen Gardon 		if (root_as_id != slot->as_id)
138246044f72SBen Gardon 			continue;
138346044f72SBen Gardon 
138446044f72SBen Gardon 		spte_set |= write_protect_gfn(kvm, root, gfn);
138546044f72SBen Gardon 	}
138646044f72SBen Gardon 	return spte_set;
138746044f72SBen Gardon }
138846044f72SBen Gardon 
138995fb5b02SBen Gardon /*
139095fb5b02SBen Gardon  * Return the level of the lowest level SPTE added to sptes.
139195fb5b02SBen Gardon  * That SPTE may be non-present.
139295fb5b02SBen Gardon  */
139339b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
139439b4d43eSSean Christopherson 			 int *root_level)
139595fb5b02SBen Gardon {
139695fb5b02SBen Gardon 	struct tdp_iter iter;
139795fb5b02SBen Gardon 	struct kvm_mmu *mmu = vcpu->arch.mmu;
139895fb5b02SBen Gardon 	gfn_t gfn = addr >> PAGE_SHIFT;
13992aa07893SSean Christopherson 	int leaf = -1;
140095fb5b02SBen Gardon 
140139b4d43eSSean Christopherson 	*root_level = vcpu->arch.mmu->shadow_root_level;
140295fb5b02SBen Gardon 
14037cca2d0bSBen Gardon 	rcu_read_lock();
14047cca2d0bSBen Gardon 
140595fb5b02SBen Gardon 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
140695fb5b02SBen Gardon 		leaf = iter.level;
1407dde81f94SSean Christopherson 		sptes[leaf] = iter.old_spte;
140895fb5b02SBen Gardon 	}
140995fb5b02SBen Gardon 
14107cca2d0bSBen Gardon 	rcu_read_unlock();
14117cca2d0bSBen Gardon 
141295fb5b02SBen Gardon 	return leaf;
141395fb5b02SBen Gardon }
1414