xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 441a5dfc)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10 
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13 
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 	struct workqueue_struct *wq;
18 
19 	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
20 	if (!wq)
21 		return -ENOMEM;
22 
23 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
24 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
25 	kvm->arch.tdp_mmu_zap_wq = wq;
26 	return 1;
27 }
28 
29 /* Arbitrarily returns true so that this may be used in if statements. */
30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
31 							     bool shared)
32 {
33 	if (shared)
34 		lockdep_assert_held_read(&kvm->mmu_lock);
35 	else
36 		lockdep_assert_held_write(&kvm->mmu_lock);
37 
38 	return true;
39 }
40 
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42 {
43 	/*
44 	 * Invalidate all roots, which besides the obvious, schedules all roots
45 	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
46 	 * ultimately frees all roots.
47 	 */
48 	kvm_tdp_mmu_invalidate_all_roots(kvm);
49 
50 	/*
51 	 * Destroying a workqueue also first flushes the workqueue, i.e. no
52 	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
53 	 */
54 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
55 
56 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
57 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
58 
59 	/*
60 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
61 	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
62 	 * can call kvm_tdp_mmu_put_root and create new callbacks.
63 	 */
64 	rcu_barrier();
65 }
66 
67 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
68 {
69 	free_page((unsigned long)sp->spt);
70 	kmem_cache_free(mmu_page_header_cache, sp);
71 }
72 
73 /*
74  * This is called through call_rcu in order to free TDP page table memory
75  * safely with respect to other kernel threads that may be operating on
76  * the memory.
77  * By only accessing TDP MMU page table memory in an RCU read critical
78  * section, and freeing it after a grace period, lockless access to that
79  * memory won't use it after it is freed.
80  */
81 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
82 {
83 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
84 					       rcu_head);
85 
86 	tdp_mmu_free_sp(sp);
87 }
88 
89 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
90 			     bool shared);
91 
92 static void tdp_mmu_zap_root_work(struct work_struct *work)
93 {
94 	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
95 						 tdp_mmu_async_work);
96 	struct kvm *kvm = root->tdp_mmu_async_data;
97 
98 	read_lock(&kvm->mmu_lock);
99 
100 	/*
101 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
102 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
103 	 * to a different pCPU.  Note, the local TLB flush on reuse also
104 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
105 	 * intermediate paging structures, that may be zapped, as such entries
106 	 * are associated with the ASID on both VMX and SVM.
107 	 */
108 	tdp_mmu_zap_root(kvm, root, true);
109 
110 	/*
111 	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
112 	 * avoiding an infinite loop.  By design, the root is reachable while
113 	 * it's being asynchronously zapped, thus a different task can put its
114 	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
115 	 * asynchronously zapped root is unavoidable.
116 	 */
117 	kvm_tdp_mmu_put_root(kvm, root, true);
118 
119 	read_unlock(&kvm->mmu_lock);
120 }
121 
122 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
123 {
124 	root->tdp_mmu_async_data = kvm;
125 	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
126 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
127 }
128 
129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
130 			  bool shared)
131 {
132 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
133 
134 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
135 		return;
136 
137 	/*
138 	 * The TDP MMU itself holds a reference to each root until the root is
139 	 * explicitly invalidated, i.e. the final reference should be never be
140 	 * put for a valid root.
141 	 */
142 	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
143 
144 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
145 	list_del_rcu(&root->link);
146 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
147 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
148 }
149 
150 /*
151  * Returns the next root after @prev_root (or the first root if @prev_root is
152  * NULL).  A reference to the returned root is acquired, and the reference to
153  * @prev_root is released (the caller obviously must hold a reference to
154  * @prev_root if it's non-NULL).
155  *
156  * If @only_valid is true, invalid roots are skipped.
157  *
158  * Returns NULL if the end of tdp_mmu_roots was reached.
159  */
160 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
161 					      struct kvm_mmu_page *prev_root,
162 					      bool shared, bool only_valid)
163 {
164 	struct kvm_mmu_page *next_root;
165 
166 	rcu_read_lock();
167 
168 	if (prev_root)
169 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
170 						  &prev_root->link,
171 						  typeof(*prev_root), link);
172 	else
173 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
174 						   typeof(*next_root), link);
175 
176 	while (next_root) {
177 		if ((!only_valid || !next_root->role.invalid) &&
178 		    kvm_tdp_mmu_get_root(next_root))
179 			break;
180 
181 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
182 				&next_root->link, typeof(*next_root), link);
183 	}
184 
185 	rcu_read_unlock();
186 
187 	if (prev_root)
188 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
189 
190 	return next_root;
191 }
192 
193 /*
194  * Note: this iterator gets and puts references to the roots it iterates over.
195  * This makes it safe to release the MMU lock and yield within the loop, but
196  * if exiting the loop early, the caller must drop the reference to the most
197  * recent root. (Unless keeping a live reference is desirable.)
198  *
199  * If shared is set, this function is operating under the MMU lock in read
200  * mode. In the unlikely event that this thread must free a root, the lock
201  * will be temporarily dropped and reacquired in write mode.
202  */
203 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
204 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
205 	     _root;								\
206 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
207 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
208 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
209 		} else
210 
211 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
212 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
213 
214 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
215 	for (_root = tdp_mmu_next_root(_kvm, NULL, false, false);		\
216 	     _root;								\
217 	     _root = tdp_mmu_next_root(_kvm, _root, false, false))		\
218 		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, false)) {		\
219 		} else
220 
221 /*
222  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
223  * the implication being that any flow that holds mmu_lock for read is
224  * inherently yield-friendly and should use the yield-safe variant above.
225  * Holding mmu_lock for write obviates the need for RCU protection as the list
226  * is guaranteed to be stable.
227  */
228 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
229 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
230 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
231 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
232 		} else
233 
234 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
235 {
236 	struct kvm_mmu_page *sp;
237 
238 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
239 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
240 
241 	return sp;
242 }
243 
244 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
245 			    gfn_t gfn, union kvm_mmu_page_role role)
246 {
247 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
248 
249 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
250 
251 	sp->role = role;
252 	sp->gfn = gfn;
253 	sp->ptep = sptep;
254 	sp->tdp_mmu_page = true;
255 
256 	trace_kvm_mmu_get_page(sp, true);
257 }
258 
259 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
260 				  struct tdp_iter *iter)
261 {
262 	struct kvm_mmu_page *parent_sp;
263 	union kvm_mmu_page_role role;
264 
265 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
266 
267 	role = parent_sp->role;
268 	role.level--;
269 
270 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
271 }
272 
273 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
274 {
275 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
276 	struct kvm *kvm = vcpu->kvm;
277 	struct kvm_mmu_page *root;
278 
279 	lockdep_assert_held_write(&kvm->mmu_lock);
280 
281 	/*
282 	 * Check for an existing root before allocating a new one.  Note, the
283 	 * role check prevents consuming an invalid root.
284 	 */
285 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
286 		if (root->role.word == role.word &&
287 		    kvm_tdp_mmu_get_root(root))
288 			goto out;
289 	}
290 
291 	root = tdp_mmu_alloc_sp(vcpu);
292 	tdp_mmu_init_sp(root, NULL, 0, role);
293 
294 	/*
295 	 * TDP MMU roots are kept until they are explicitly invalidated, either
296 	 * by a memslot update or by the destruction of the VM.  Initialize the
297 	 * refcount to two; one reference for the vCPU, and one reference for
298 	 * the TDP MMU itself, which is held until the root is invalidated and
299 	 * is ultimately put by tdp_mmu_zap_root_work().
300 	 */
301 	refcount_set(&root->tdp_mmu_root_count, 2);
302 
303 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
304 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
305 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
306 
307 out:
308 	return __pa(root->spt);
309 }
310 
311 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
312 				u64 old_spte, u64 new_spte, int level,
313 				bool shared);
314 
315 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
316 {
317 	kvm_account_pgtable_pages((void *)sp->spt, +1);
318 	atomic64_inc(&kvm->arch.tdp_mmu_pages);
319 }
320 
321 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
322 {
323 	kvm_account_pgtable_pages((void *)sp->spt, -1);
324 	atomic64_dec(&kvm->arch.tdp_mmu_pages);
325 }
326 
327 /**
328  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
329  *
330  * @kvm: kvm instance
331  * @sp: the page to be removed
332  * @shared: This operation may not be running under the exclusive use of
333  *	    the MMU lock and the operation must synchronize with other
334  *	    threads that might be adding or removing pages.
335  */
336 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
337 			      bool shared)
338 {
339 	tdp_unaccount_mmu_page(kvm, sp);
340 
341 	if (!sp->nx_huge_page_disallowed)
342 		return;
343 
344 	if (shared)
345 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
346 	else
347 		lockdep_assert_held_write(&kvm->mmu_lock);
348 
349 	sp->nx_huge_page_disallowed = false;
350 	untrack_possible_nx_huge_page(kvm, sp);
351 
352 	if (shared)
353 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
354 }
355 
356 /**
357  * handle_removed_pt() - handle a page table removed from the TDP structure
358  *
359  * @kvm: kvm instance
360  * @pt: the page removed from the paging structure
361  * @shared: This operation may not be running under the exclusive use
362  *	    of the MMU lock and the operation must synchronize with other
363  *	    threads that might be modifying SPTEs.
364  *
365  * Given a page table that has been removed from the TDP paging structure,
366  * iterates through the page table to clear SPTEs and free child page tables.
367  *
368  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
369  * protection. Since this thread removed it from the paging structure,
370  * this thread will be responsible for ensuring the page is freed. Hence the
371  * early rcu_dereferences in the function.
372  */
373 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
374 {
375 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
376 	int level = sp->role.level;
377 	gfn_t base_gfn = sp->gfn;
378 	int i;
379 
380 	trace_kvm_mmu_prepare_zap_page(sp);
381 
382 	tdp_mmu_unlink_sp(kvm, sp, shared);
383 
384 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
385 		tdp_ptep_t sptep = pt + i;
386 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
387 		u64 old_spte;
388 
389 		if (shared) {
390 			/*
391 			 * Set the SPTE to a nonpresent value that other
392 			 * threads will not overwrite. If the SPTE was
393 			 * already marked as removed then another thread
394 			 * handling a page fault could overwrite it, so
395 			 * set the SPTE until it is set from some other
396 			 * value to the removed SPTE value.
397 			 */
398 			for (;;) {
399 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
400 				if (!is_removed_spte(old_spte))
401 					break;
402 				cpu_relax();
403 			}
404 		} else {
405 			/*
406 			 * If the SPTE is not MMU-present, there is no backing
407 			 * page associated with the SPTE and so no side effects
408 			 * that need to be recorded, and exclusive ownership of
409 			 * mmu_lock ensures the SPTE can't be made present.
410 			 * Note, zapping MMIO SPTEs is also unnecessary as they
411 			 * are guarded by the memslots generation, not by being
412 			 * unreachable.
413 			 */
414 			old_spte = kvm_tdp_mmu_read_spte(sptep);
415 			if (!is_shadow_present_pte(old_spte))
416 				continue;
417 
418 			/*
419 			 * Use the common helper instead of a raw WRITE_ONCE as
420 			 * the SPTE needs to be updated atomically if it can be
421 			 * modified by a different vCPU outside of mmu_lock.
422 			 * Even though the parent SPTE is !PRESENT, the TLB
423 			 * hasn't yet been flushed, and both Intel and AMD
424 			 * document that A/D assists can use upper-level PxE
425 			 * entries that are cached in the TLB, i.e. the CPU can
426 			 * still access the page and mark it dirty.
427 			 *
428 			 * No retry is needed in the atomic update path as the
429 			 * sole concern is dropping a Dirty bit, i.e. no other
430 			 * task can zap/remove the SPTE as mmu_lock is held for
431 			 * write.  Marking the SPTE as a removed SPTE is not
432 			 * strictly necessary for the same reason, but using
433 			 * the remove SPTE value keeps the shared/exclusive
434 			 * paths consistent and allows the handle_changed_spte()
435 			 * call below to hardcode the new value to REMOVED_SPTE.
436 			 *
437 			 * Note, even though dropping a Dirty bit is the only
438 			 * scenario where a non-atomic update could result in a
439 			 * functional bug, simply checking the Dirty bit isn't
440 			 * sufficient as a fast page fault could read the upper
441 			 * level SPTE before it is zapped, and then make this
442 			 * target SPTE writable, resume the guest, and set the
443 			 * Dirty bit between reading the SPTE above and writing
444 			 * it here.
445 			 */
446 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
447 							  REMOVED_SPTE, level);
448 		}
449 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
450 				    old_spte, REMOVED_SPTE, level, shared);
451 	}
452 
453 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
454 }
455 
456 /**
457  * handle_changed_spte - handle bookkeeping associated with an SPTE change
458  * @kvm: kvm instance
459  * @as_id: the address space of the paging structure the SPTE was a part of
460  * @gfn: the base GFN that was mapped by the SPTE
461  * @old_spte: The value of the SPTE before the change
462  * @new_spte: The value of the SPTE after the change
463  * @level: the level of the PT the SPTE is part of in the paging structure
464  * @shared: This operation may not be running under the exclusive use of
465  *	    the MMU lock and the operation must synchronize with other
466  *	    threads that might be modifying SPTEs.
467  *
468  * Handle bookkeeping that might result from the modification of a SPTE.  Note,
469  * dirty logging updates are handled in common code, not here (see make_spte()
470  * and fast_pf_fix_direct_spte()).
471  */
472 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
473 				u64 old_spte, u64 new_spte, int level,
474 				bool shared)
475 {
476 	bool was_present = is_shadow_present_pte(old_spte);
477 	bool is_present = is_shadow_present_pte(new_spte);
478 	bool was_leaf = was_present && is_last_spte(old_spte, level);
479 	bool is_leaf = is_present && is_last_spte(new_spte, level);
480 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
481 
482 	WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
483 	WARN_ON_ONCE(level < PG_LEVEL_4K);
484 	WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
485 
486 	/*
487 	 * If this warning were to trigger it would indicate that there was a
488 	 * missing MMU notifier or a race with some notifier handler.
489 	 * A present, leaf SPTE should never be directly replaced with another
490 	 * present leaf SPTE pointing to a different PFN. A notifier handler
491 	 * should be zapping the SPTE before the main MM's page table is
492 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
493 	 * thread before replacement.
494 	 */
495 	if (was_leaf && is_leaf && pfn_changed) {
496 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
497 		       "SPTE with another present leaf SPTE mapping a\n"
498 		       "different PFN!\n"
499 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
500 		       as_id, gfn, old_spte, new_spte, level);
501 
502 		/*
503 		 * Crash the host to prevent error propagation and guest data
504 		 * corruption.
505 		 */
506 		BUG();
507 	}
508 
509 	if (old_spte == new_spte)
510 		return;
511 
512 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
513 
514 	if (is_leaf)
515 		check_spte_writable_invariants(new_spte);
516 
517 	/*
518 	 * The only times a SPTE should be changed from a non-present to
519 	 * non-present state is when an MMIO entry is installed/modified/
520 	 * removed. In that case, there is nothing to do here.
521 	 */
522 	if (!was_present && !is_present) {
523 		/*
524 		 * If this change does not involve a MMIO SPTE or removed SPTE,
525 		 * it is unexpected. Log the change, though it should not
526 		 * impact the guest since both the former and current SPTEs
527 		 * are nonpresent.
528 		 */
529 		if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
530 				 !is_mmio_spte(new_spte) &&
531 				 !is_removed_spte(new_spte)))
532 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
533 			       "should not be replaced with another,\n"
534 			       "different nonpresent SPTE, unless one or both\n"
535 			       "are MMIO SPTEs, or the new SPTE is\n"
536 			       "a temporary removed SPTE.\n"
537 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
538 			       as_id, gfn, old_spte, new_spte, level);
539 		return;
540 	}
541 
542 	if (is_leaf != was_leaf)
543 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
544 
545 	if (was_leaf && is_dirty_spte(old_spte) &&
546 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
547 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
548 
549 	/*
550 	 * Recursively handle child PTs if the change removed a subtree from
551 	 * the paging structure.  Note the WARN on the PFN changing without the
552 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
553 	 * pages are kernel allocations and should never be migrated.
554 	 */
555 	if (was_present && !was_leaf &&
556 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
557 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
558 
559 	if (was_leaf && is_accessed_spte(old_spte) &&
560 	    (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
561 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
562 }
563 
564 /*
565  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
566  * and handle the associated bookkeeping.  Do not mark the page dirty
567  * in KVM's dirty bitmaps.
568  *
569  * If setting the SPTE fails because it has changed, iter->old_spte will be
570  * refreshed to the current value of the spte.
571  *
572  * @kvm: kvm instance
573  * @iter: a tdp_iter instance currently on the SPTE that should be set
574  * @new_spte: The value the SPTE should be set to
575  * Return:
576  * * 0      - If the SPTE was set.
577  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
578  *            no side-effects other than setting iter->old_spte to the last
579  *            known value of the spte.
580  */
581 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
582 					  struct tdp_iter *iter,
583 					  u64 new_spte)
584 {
585 	u64 *sptep = rcu_dereference(iter->sptep);
586 
587 	/*
588 	 * The caller is responsible for ensuring the old SPTE is not a REMOVED
589 	 * SPTE.  KVM should never attempt to zap or manipulate a REMOVED SPTE,
590 	 * and pre-checking before inserting a new SPTE is advantageous as it
591 	 * avoids unnecessary work.
592 	 */
593 	WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
594 
595 	lockdep_assert_held_read(&kvm->mmu_lock);
596 
597 	/*
598 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
599 	 * does not hold the mmu_lock.  On failure, i.e. if a different logical
600 	 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
601 	 * the current value, so the caller operates on fresh data, e.g. if it
602 	 * retries tdp_mmu_set_spte_atomic()
603 	 */
604 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
605 		return -EBUSY;
606 
607 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
608 			    new_spte, iter->level, true);
609 
610 	return 0;
611 }
612 
613 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
614 					  struct tdp_iter *iter)
615 {
616 	int ret;
617 
618 	/*
619 	 * Freeze the SPTE by setting it to a special,
620 	 * non-present value. This will stop other threads from
621 	 * immediately installing a present entry in its place
622 	 * before the TLBs are flushed.
623 	 */
624 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
625 	if (ret)
626 		return ret;
627 
628 	kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
629 
630 	/*
631 	 * No other thread can overwrite the removed SPTE as they must either
632 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
633 	 * overwrite the special removed SPTE value. No bookkeeping is needed
634 	 * here since the SPTE is going from non-present to non-present.  Use
635 	 * the raw write helper to avoid an unnecessary check on volatile bits.
636 	 */
637 	__kvm_tdp_mmu_write_spte(iter->sptep, 0);
638 
639 	return 0;
640 }
641 
642 
643 /*
644  * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
645  * @kvm:	      KVM instance
646  * @as_id:	      Address space ID, i.e. regular vs. SMM
647  * @sptep:	      Pointer to the SPTE
648  * @old_spte:	      The current value of the SPTE
649  * @new_spte:	      The new value that will be set for the SPTE
650  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
651  * @level:	      The level _containing_ the SPTE (its parent PT's level)
652  *
653  * Returns the old SPTE value, which _may_ be different than @old_spte if the
654  * SPTE had voldatile bits.
655  */
656 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
657 			    u64 old_spte, u64 new_spte, gfn_t gfn, int level)
658 {
659 	lockdep_assert_held_write(&kvm->mmu_lock);
660 
661 	/*
662 	 * No thread should be using this function to set SPTEs to or from the
663 	 * temporary removed SPTE value.
664 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
665 	 * should be used. If operating under the MMU lock in write mode, the
666 	 * use of the removed SPTE should not be necessary.
667 	 */
668 	WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
669 
670 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
671 
672 	handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
673 	return old_spte;
674 }
675 
676 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
677 					 u64 new_spte)
678 {
679 	WARN_ON_ONCE(iter->yielded);
680 	iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
681 					  iter->old_spte, new_spte,
682 					  iter->gfn, iter->level);
683 }
684 
685 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
686 	for_each_tdp_pte(_iter, _root, _start, _end)
687 
688 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
689 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
690 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
691 		    !is_last_spte(_iter.old_spte, _iter.level))		\
692 			continue;					\
693 		else
694 
695 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
696 	for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
697 
698 /*
699  * Yield if the MMU lock is contended or this thread needs to return control
700  * to the scheduler.
701  *
702  * If this function should yield and flush is set, it will perform a remote
703  * TLB flush before yielding.
704  *
705  * If this function yields, iter->yielded is set and the caller must skip to
706  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
707  * over the paging structures to allow the iterator to continue its traversal
708  * from the paging structure root.
709  *
710  * Returns true if this function yielded.
711  */
712 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
713 							  struct tdp_iter *iter,
714 							  bool flush, bool shared)
715 {
716 	WARN_ON_ONCE(iter->yielded);
717 
718 	/* Ensure forward progress has been made before yielding. */
719 	if (iter->next_last_level_gfn == iter->yielded_gfn)
720 		return false;
721 
722 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
723 		if (flush)
724 			kvm_flush_remote_tlbs(kvm);
725 
726 		rcu_read_unlock();
727 
728 		if (shared)
729 			cond_resched_rwlock_read(&kvm->mmu_lock);
730 		else
731 			cond_resched_rwlock_write(&kvm->mmu_lock);
732 
733 		rcu_read_lock();
734 
735 		WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
736 
737 		iter->yielded = true;
738 	}
739 
740 	return iter->yielded;
741 }
742 
743 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
744 {
745 	/*
746 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
747 	 * a gpa range that would exceed the max gfn, and KVM does not create
748 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
749 	 * the slow emulation path every time.
750 	 */
751 	return kvm_mmu_max_gfn() + 1;
752 }
753 
754 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
755 			       bool shared, int zap_level)
756 {
757 	struct tdp_iter iter;
758 
759 	gfn_t end = tdp_mmu_max_gfn_exclusive();
760 	gfn_t start = 0;
761 
762 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
763 retry:
764 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
765 			continue;
766 
767 		if (!is_shadow_present_pte(iter.old_spte))
768 			continue;
769 
770 		if (iter.level > zap_level)
771 			continue;
772 
773 		if (!shared)
774 			tdp_mmu_iter_set_spte(kvm, &iter, 0);
775 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
776 			goto retry;
777 	}
778 }
779 
780 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
781 			     bool shared)
782 {
783 
784 	/*
785 	 * The root must have an elevated refcount so that it's reachable via
786 	 * mmu_notifier callbacks, which allows this path to yield and drop
787 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
788 	 * must drop all references to relevant pages prior to completing the
789 	 * callback.  Dropping mmu_lock with an unreachable root would result
790 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
791 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
792 	 * dirty accessed bits to the SPTE's associated struct page.
793 	 */
794 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
795 
796 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
797 
798 	rcu_read_lock();
799 
800 	/*
801 	 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
802 	 * split the zap into two passes.  On the first pass, zap at the 1gb
803 	 * level, and then zap top-level SPs on the second pass.  "1gb" is not
804 	 * arbitrary, as KVM must be able to zap a 1gb shadow page without
805 	 * inducing a stall to allow in-place replacement with a 1gb hugepage.
806 	 *
807 	 * Because zapping a SP recurses on its children, stepping down to
808 	 * PG_LEVEL_4K in the iterator itself is unnecessary.
809 	 */
810 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
811 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
812 
813 	rcu_read_unlock();
814 }
815 
816 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
817 {
818 	u64 old_spte;
819 
820 	/*
821 	 * This helper intentionally doesn't allow zapping a root shadow page,
822 	 * which doesn't have a parent page table and thus no associated entry.
823 	 */
824 	if (WARN_ON_ONCE(!sp->ptep))
825 		return false;
826 
827 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
828 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
829 		return false;
830 
831 	tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
832 			 sp->gfn, sp->role.level + 1);
833 
834 	return true;
835 }
836 
837 /*
838  * If can_yield is true, will release the MMU lock and reschedule if the
839  * scheduler needs the CPU or there is contention on the MMU lock. If this
840  * function cannot yield, it will not release the MMU lock or reschedule and
841  * the caller must ensure it does not supply too large a GFN range, or the
842  * operation can cause a soft lockup.
843  */
844 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
845 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
846 {
847 	struct tdp_iter iter;
848 
849 	end = min(end, tdp_mmu_max_gfn_exclusive());
850 
851 	lockdep_assert_held_write(&kvm->mmu_lock);
852 
853 	rcu_read_lock();
854 
855 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
856 		if (can_yield &&
857 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
858 			flush = false;
859 			continue;
860 		}
861 
862 		if (!is_shadow_present_pte(iter.old_spte) ||
863 		    !is_last_spte(iter.old_spte, iter.level))
864 			continue;
865 
866 		tdp_mmu_iter_set_spte(kvm, &iter, 0);
867 		flush = true;
868 	}
869 
870 	rcu_read_unlock();
871 
872 	/*
873 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
874 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
875 	 */
876 	return flush;
877 }
878 
879 /*
880  * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
881  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
882  * more SPTEs were zapped since the MMU lock was last acquired.
883  */
884 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
885 {
886 	struct kvm_mmu_page *root;
887 
888 	for_each_tdp_mmu_root_yield_safe(kvm, root)
889 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
890 
891 	return flush;
892 }
893 
894 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
895 {
896 	struct kvm_mmu_page *root;
897 
898 	/*
899 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
900 	 * before returning to the caller.  Zap directly even if the root is
901 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
902 	 * all that expensive and mmu_lock is already held, which means the
903 	 * worker has yielded, i.e. flushing the work instead of zapping here
904 	 * isn't guaranteed to be any faster.
905 	 *
906 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
907 	 * is being destroyed or the userspace VMM has exited.  In both cases,
908 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
909 	 */
910 	for_each_tdp_mmu_root_yield_safe(kvm, root)
911 		tdp_mmu_zap_root(kvm, root, false);
912 }
913 
914 /*
915  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
916  * zap" completes.
917  */
918 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
919 {
920 	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
921 }
922 
923 /*
924  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
925  * is about to be zapped, e.g. in response to a memslots update.  The actual
926  * zapping is performed asynchronously.  Using a separate workqueue makes it
927  * easy to ensure that the destruction is performed before the "fast zap"
928  * completes, without keeping a separate list of invalidated roots; the list is
929  * effectively the list of work items in the workqueue.
930  *
931  * Note, the asynchronous worker is gifted the TDP MMU's reference.
932  * See kvm_tdp_mmu_get_vcpu_root_hpa().
933  */
934 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
935 {
936 	struct kvm_mmu_page *root;
937 
938 	/*
939 	 * mmu_lock must be held for write to ensure that a root doesn't become
940 	 * invalid while there are active readers (invalidating a root while
941 	 * there are active readers may or may not be problematic in practice,
942 	 * but it's uncharted territory and not supported).
943 	 *
944 	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
945 	 * being destroyed after all references have been put, or if no vCPUs
946 	 * have been created (which means there are no roots), i.e. the VM is
947 	 * being destroyed in an error path of KVM_CREATE_VM.
948 	 */
949 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
950 	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
951 		lockdep_assert_held_write(&kvm->mmu_lock);
952 
953 	/*
954 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
955 	 * be other references to @kvm, i.e. nothing else can invalidate roots
956 	 * or be consuming roots, but walking the list of roots does need to be
957 	 * guarded against roots being deleted by the asynchronous zap worker.
958 	 */
959 	rcu_read_lock();
960 
961 	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
962 		if (!root->role.invalid) {
963 			root->role.invalid = true;
964 			tdp_mmu_schedule_zap_root(kvm, root);
965 		}
966 	}
967 
968 	rcu_read_unlock();
969 }
970 
971 /*
972  * Installs a last-level SPTE to handle a TDP page fault.
973  * (NPT/EPT violation/misconfiguration)
974  */
975 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
976 					  struct kvm_page_fault *fault,
977 					  struct tdp_iter *iter)
978 {
979 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
980 	u64 new_spte;
981 	int ret = RET_PF_FIXED;
982 	bool wrprot = false;
983 
984 	if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
985 		return RET_PF_RETRY;
986 
987 	if (unlikely(!fault->slot))
988 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
989 	else
990 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
991 					 fault->pfn, iter->old_spte, fault->prefetch, true,
992 					 fault->map_writable, &new_spte);
993 
994 	if (new_spte == iter->old_spte)
995 		ret = RET_PF_SPURIOUS;
996 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
997 		return RET_PF_RETRY;
998 	else if (is_shadow_present_pte(iter->old_spte) &&
999 		 !is_last_spte(iter->old_spte, iter->level))
1000 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1001 
1002 	/*
1003 	 * If the page fault was caused by a write but the page is write
1004 	 * protected, emulation is needed. If the emulation was skipped,
1005 	 * the vCPU would have the same fault again.
1006 	 */
1007 	if (wrprot) {
1008 		if (fault->write)
1009 			ret = RET_PF_EMULATE;
1010 	}
1011 
1012 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1013 	if (unlikely(is_mmio_spte(new_spte))) {
1014 		vcpu->stat.pf_mmio_spte_created++;
1015 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1016 				     new_spte);
1017 		ret = RET_PF_EMULATE;
1018 	} else {
1019 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1020 				       rcu_dereference(iter->sptep));
1021 	}
1022 
1023 	return ret;
1024 }
1025 
1026 /*
1027  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1028  * provided page table.
1029  *
1030  * @kvm: kvm instance
1031  * @iter: a tdp_iter instance currently on the SPTE that should be set
1032  * @sp: The new TDP page table to install.
1033  * @shared: This operation is running under the MMU lock in read mode.
1034  *
1035  * Returns: 0 if the new page table was installed. Non-0 if the page table
1036  *          could not be installed (e.g. the atomic compare-exchange failed).
1037  */
1038 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1039 			   struct kvm_mmu_page *sp, bool shared)
1040 {
1041 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1042 	int ret = 0;
1043 
1044 	if (shared) {
1045 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1046 		if (ret)
1047 			return ret;
1048 	} else {
1049 		tdp_mmu_iter_set_spte(kvm, iter, spte);
1050 	}
1051 
1052 	tdp_account_mmu_page(kvm, sp);
1053 
1054 	return 0;
1055 }
1056 
1057 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1058 				   struct kvm_mmu_page *sp, bool shared);
1059 
1060 /*
1061  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1062  * page tables and SPTEs to translate the faulting guest physical address.
1063  */
1064 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1065 {
1066 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1067 	struct kvm *kvm = vcpu->kvm;
1068 	struct tdp_iter iter;
1069 	struct kvm_mmu_page *sp;
1070 	int ret = RET_PF_RETRY;
1071 
1072 	kvm_mmu_hugepage_adjust(vcpu, fault);
1073 
1074 	trace_kvm_mmu_spte_requested(fault);
1075 
1076 	rcu_read_lock();
1077 
1078 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1079 		int r;
1080 
1081 		if (fault->nx_huge_page_workaround_enabled)
1082 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1083 
1084 		/*
1085 		 * If SPTE has been frozen by another thread, just give up and
1086 		 * retry, avoiding unnecessary page table allocation and free.
1087 		 */
1088 		if (is_removed_spte(iter.old_spte))
1089 			goto retry;
1090 
1091 		if (iter.level == fault->goal_level)
1092 			goto map_target_level;
1093 
1094 		/* Step down into the lower level page table if it exists. */
1095 		if (is_shadow_present_pte(iter.old_spte) &&
1096 		    !is_large_pte(iter.old_spte))
1097 			continue;
1098 
1099 		/*
1100 		 * The SPTE is either non-present or points to a huge page that
1101 		 * needs to be split.
1102 		 */
1103 		sp = tdp_mmu_alloc_sp(vcpu);
1104 		tdp_mmu_init_child_sp(sp, &iter);
1105 
1106 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1107 
1108 		if (is_shadow_present_pte(iter.old_spte))
1109 			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1110 		else
1111 			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1112 
1113 		/*
1114 		 * Force the guest to retry if installing an upper level SPTE
1115 		 * failed, e.g. because a different task modified the SPTE.
1116 		 */
1117 		if (r) {
1118 			tdp_mmu_free_sp(sp);
1119 			goto retry;
1120 		}
1121 
1122 		if (fault->huge_page_disallowed &&
1123 		    fault->req_level >= iter.level) {
1124 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1125 			if (sp->nx_huge_page_disallowed)
1126 				track_possible_nx_huge_page(kvm, sp);
1127 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1128 		}
1129 	}
1130 
1131 	/*
1132 	 * The walk aborted before reaching the target level, e.g. because the
1133 	 * iterator detected an upper level SPTE was frozen during traversal.
1134 	 */
1135 	WARN_ON_ONCE(iter.level == fault->goal_level);
1136 	goto retry;
1137 
1138 map_target_level:
1139 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1140 
1141 retry:
1142 	rcu_read_unlock();
1143 	return ret;
1144 }
1145 
1146 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1147 				 bool flush)
1148 {
1149 	struct kvm_mmu_page *root;
1150 
1151 	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
1152 		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1153 					  range->may_block, flush);
1154 
1155 	return flush;
1156 }
1157 
1158 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1159 			      struct kvm_gfn_range *range);
1160 
1161 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1162 						   struct kvm_gfn_range *range,
1163 						   tdp_handler_t handler)
1164 {
1165 	struct kvm_mmu_page *root;
1166 	struct tdp_iter iter;
1167 	bool ret = false;
1168 
1169 	/*
1170 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1171 	 * into this helper allow blocking; it'd be dead, wasteful code.
1172 	 */
1173 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1174 		rcu_read_lock();
1175 
1176 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1177 			ret |= handler(kvm, &iter, range);
1178 
1179 		rcu_read_unlock();
1180 	}
1181 
1182 	return ret;
1183 }
1184 
1185 /*
1186  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1187  * if any of the GFNs in the range have been accessed.
1188  *
1189  * No need to mark the corresponding PFN as accessed as this call is coming
1190  * from the clear_young() or clear_flush_young() notifier, which uses the
1191  * return value to determine if the page has been accessed.
1192  */
1193 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1194 			  struct kvm_gfn_range *range)
1195 {
1196 	u64 new_spte;
1197 
1198 	/* If we have a non-accessed entry we don't need to change the pte. */
1199 	if (!is_accessed_spte(iter->old_spte))
1200 		return false;
1201 
1202 	if (spte_ad_enabled(iter->old_spte)) {
1203 		iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1204 							 iter->old_spte,
1205 							 shadow_accessed_mask,
1206 							 iter->level);
1207 		new_spte = iter->old_spte & ~shadow_accessed_mask;
1208 	} else {
1209 		/*
1210 		 * Capture the dirty status of the page, so that it doesn't get
1211 		 * lost when the SPTE is marked for access tracking.
1212 		 */
1213 		if (is_writable_pte(iter->old_spte))
1214 			kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1215 
1216 		new_spte = mark_spte_for_access_track(iter->old_spte);
1217 		iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1218 							iter->old_spte, new_spte,
1219 							iter->level);
1220 	}
1221 
1222 	trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1223 				       iter->old_spte, new_spte);
1224 	return true;
1225 }
1226 
1227 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1228 {
1229 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1230 }
1231 
1232 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1233 			 struct kvm_gfn_range *range)
1234 {
1235 	return is_accessed_spte(iter->old_spte);
1236 }
1237 
1238 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1239 {
1240 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1241 }
1242 
1243 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1244 			 struct kvm_gfn_range *range)
1245 {
1246 	u64 new_spte;
1247 
1248 	/* Huge pages aren't expected to be modified without first being zapped. */
1249 	WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
1250 
1251 	if (iter->level != PG_LEVEL_4K ||
1252 	    !is_shadow_present_pte(iter->old_spte))
1253 		return false;
1254 
1255 	/*
1256 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
1257 	 * zero the SPTE before setting the new PFN, but doing so preserves the
1258 	 * invariant that the PFN of a present * leaf SPTE can never change.
1259 	 * See handle_changed_spte().
1260 	 */
1261 	tdp_mmu_iter_set_spte(kvm, iter, 0);
1262 
1263 	if (!pte_write(range->arg.pte)) {
1264 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1265 								  pte_pfn(range->arg.pte));
1266 
1267 		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1268 	}
1269 
1270 	return true;
1271 }
1272 
1273 /*
1274  * Handle the changed_pte MMU notifier for the TDP MMU.
1275  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1276  * notifier.
1277  * Returns non-zero if a flush is needed before releasing the MMU lock.
1278  */
1279 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1280 {
1281 	/*
1282 	 * No need to handle the remote TLB flush under RCU protection, the
1283 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1284 	 * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1285 	 */
1286 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1287 }
1288 
1289 /*
1290  * Remove write access from all SPTEs at or above min_level that map GFNs
1291  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1292  * be flushed.
1293  */
1294 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1295 			     gfn_t start, gfn_t end, int min_level)
1296 {
1297 	struct tdp_iter iter;
1298 	u64 new_spte;
1299 	bool spte_set = false;
1300 
1301 	rcu_read_lock();
1302 
1303 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1304 
1305 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1306 retry:
1307 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1308 			continue;
1309 
1310 		if (!is_shadow_present_pte(iter.old_spte) ||
1311 		    !is_last_spte(iter.old_spte, iter.level) ||
1312 		    !(iter.old_spte & PT_WRITABLE_MASK))
1313 			continue;
1314 
1315 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1316 
1317 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1318 			goto retry;
1319 
1320 		spte_set = true;
1321 	}
1322 
1323 	rcu_read_unlock();
1324 	return spte_set;
1325 }
1326 
1327 /*
1328  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1329  * only affect leaf SPTEs down to min_level.
1330  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1331  */
1332 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1333 			     const struct kvm_memory_slot *slot, int min_level)
1334 {
1335 	struct kvm_mmu_page *root;
1336 	bool spte_set = false;
1337 
1338 	lockdep_assert_held_read(&kvm->mmu_lock);
1339 
1340 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1341 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1342 			     slot->base_gfn + slot->npages, min_level);
1343 
1344 	return spte_set;
1345 }
1346 
1347 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1348 {
1349 	struct kvm_mmu_page *sp;
1350 
1351 	gfp |= __GFP_ZERO;
1352 
1353 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1354 	if (!sp)
1355 		return NULL;
1356 
1357 	sp->spt = (void *)__get_free_page(gfp);
1358 	if (!sp->spt) {
1359 		kmem_cache_free(mmu_page_header_cache, sp);
1360 		return NULL;
1361 	}
1362 
1363 	return sp;
1364 }
1365 
1366 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1367 						       struct tdp_iter *iter,
1368 						       bool shared)
1369 {
1370 	struct kvm_mmu_page *sp;
1371 
1372 	/*
1373 	 * Since we are allocating while under the MMU lock we have to be
1374 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1375 	 * reclaim and to avoid making any filesystem callbacks (which can end
1376 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1377 	 *
1378 	 * If this allocation fails we drop the lock and retry with reclaim
1379 	 * allowed.
1380 	 */
1381 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1382 	if (sp)
1383 		return sp;
1384 
1385 	rcu_read_unlock();
1386 
1387 	if (shared)
1388 		read_unlock(&kvm->mmu_lock);
1389 	else
1390 		write_unlock(&kvm->mmu_lock);
1391 
1392 	iter->yielded = true;
1393 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1394 
1395 	if (shared)
1396 		read_lock(&kvm->mmu_lock);
1397 	else
1398 		write_lock(&kvm->mmu_lock);
1399 
1400 	rcu_read_lock();
1401 
1402 	return sp;
1403 }
1404 
1405 /* Note, the caller is responsible for initializing @sp. */
1406 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1407 				   struct kvm_mmu_page *sp, bool shared)
1408 {
1409 	const u64 huge_spte = iter->old_spte;
1410 	const int level = iter->level;
1411 	int ret, i;
1412 
1413 	/*
1414 	 * No need for atomics when writing to sp->spt since the page table has
1415 	 * not been linked in yet and thus is not reachable from any other CPU.
1416 	 */
1417 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1418 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1419 
1420 	/*
1421 	 * Replace the huge spte with a pointer to the populated lower level
1422 	 * page table. Since we are making this change without a TLB flush vCPUs
1423 	 * will see a mix of the split mappings and the original huge mapping,
1424 	 * depending on what's currently in their TLB. This is fine from a
1425 	 * correctness standpoint since the translation will be the same either
1426 	 * way.
1427 	 */
1428 	ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1429 	if (ret)
1430 		goto out;
1431 
1432 	/*
1433 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1434 	 * are overwriting from the page stats. But we have to manually update
1435 	 * the page stats with the new present child pages.
1436 	 */
1437 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1438 
1439 out:
1440 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1441 	return ret;
1442 }
1443 
1444 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1445 					 struct kvm_mmu_page *root,
1446 					 gfn_t start, gfn_t end,
1447 					 int target_level, bool shared)
1448 {
1449 	struct kvm_mmu_page *sp = NULL;
1450 	struct tdp_iter iter;
1451 	int ret = 0;
1452 
1453 	rcu_read_lock();
1454 
1455 	/*
1456 	 * Traverse the page table splitting all huge pages above the target
1457 	 * level into one lower level. For example, if we encounter a 1GB page
1458 	 * we split it into 512 2MB pages.
1459 	 *
1460 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1461 	 * to visit an SPTE before ever visiting its children, which means we
1462 	 * will correctly recursively split huge pages that are more than one
1463 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1464 	 * and then splitting each of those to 512 4KB pages).
1465 	 */
1466 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1467 retry:
1468 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1469 			continue;
1470 
1471 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1472 			continue;
1473 
1474 		if (!sp) {
1475 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1476 			if (!sp) {
1477 				ret = -ENOMEM;
1478 				trace_kvm_mmu_split_huge_page(iter.gfn,
1479 							      iter.old_spte,
1480 							      iter.level, ret);
1481 				break;
1482 			}
1483 
1484 			if (iter.yielded)
1485 				continue;
1486 		}
1487 
1488 		tdp_mmu_init_child_sp(sp, &iter);
1489 
1490 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1491 			goto retry;
1492 
1493 		sp = NULL;
1494 	}
1495 
1496 	rcu_read_unlock();
1497 
1498 	/*
1499 	 * It's possible to exit the loop having never used the last sp if, for
1500 	 * example, a vCPU doing HugePage NX splitting wins the race and
1501 	 * installs its own sp in place of the last sp we tried to split.
1502 	 */
1503 	if (sp)
1504 		tdp_mmu_free_sp(sp);
1505 
1506 	return ret;
1507 }
1508 
1509 
1510 /*
1511  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1512  */
1513 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1514 				      const struct kvm_memory_slot *slot,
1515 				      gfn_t start, gfn_t end,
1516 				      int target_level, bool shared)
1517 {
1518 	struct kvm_mmu_page *root;
1519 	int r = 0;
1520 
1521 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1522 
1523 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1524 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1525 		if (r) {
1526 			kvm_tdp_mmu_put_root(kvm, root, shared);
1527 			break;
1528 		}
1529 	}
1530 }
1531 
1532 /*
1533  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1534  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1535  * If AD bits are not enabled, this will require clearing the writable bit on
1536  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1537  * be flushed.
1538  */
1539 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1540 			   gfn_t start, gfn_t end)
1541 {
1542 	u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
1543 	struct tdp_iter iter;
1544 	bool spte_set = false;
1545 
1546 	rcu_read_lock();
1547 
1548 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
1549 retry:
1550 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1551 			continue;
1552 
1553 		if (!is_shadow_present_pte(iter.old_spte))
1554 			continue;
1555 
1556 		KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1557 				spte_ad_need_write_protect(iter.old_spte));
1558 
1559 		if (!(iter.old_spte & dbit))
1560 			continue;
1561 
1562 		if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1563 			goto retry;
1564 
1565 		spte_set = true;
1566 	}
1567 
1568 	rcu_read_unlock();
1569 	return spte_set;
1570 }
1571 
1572 /*
1573  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1574  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1575  * If AD bits are not enabled, this will require clearing the writable bit on
1576  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1577  * be flushed.
1578  */
1579 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1580 				  const struct kvm_memory_slot *slot)
1581 {
1582 	struct kvm_mmu_page *root;
1583 	bool spte_set = false;
1584 
1585 	lockdep_assert_held_read(&kvm->mmu_lock);
1586 
1587 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1588 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1589 				slot->base_gfn + slot->npages);
1590 
1591 	return spte_set;
1592 }
1593 
1594 /*
1595  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1596  * set in mask, starting at gfn. The given memslot is expected to contain all
1597  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1598  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1599  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1600  */
1601 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1602 				  gfn_t gfn, unsigned long mask, bool wrprot)
1603 {
1604 	u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
1605 						   shadow_dirty_mask;
1606 	struct tdp_iter iter;
1607 
1608 	lockdep_assert_held_write(&kvm->mmu_lock);
1609 
1610 	rcu_read_lock();
1611 
1612 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1613 				    gfn + BITS_PER_LONG) {
1614 		if (!mask)
1615 			break;
1616 
1617 		KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1618 				spte_ad_need_write_protect(iter.old_spte));
1619 
1620 		if (iter.level > PG_LEVEL_4K ||
1621 		    !(mask & (1UL << (iter.gfn - gfn))))
1622 			continue;
1623 
1624 		mask &= ~(1UL << (iter.gfn - gfn));
1625 
1626 		if (!(iter.old_spte & dbit))
1627 			continue;
1628 
1629 		iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1630 							iter.old_spte, dbit,
1631 							iter.level);
1632 
1633 		trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1634 					       iter.old_spte,
1635 					       iter.old_spte & ~dbit);
1636 		kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1637 	}
1638 
1639 	rcu_read_unlock();
1640 }
1641 
1642 /*
1643  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1644  * set in mask, starting at gfn. The given memslot is expected to contain all
1645  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1646  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1647  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1648  */
1649 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1650 				       struct kvm_memory_slot *slot,
1651 				       gfn_t gfn, unsigned long mask,
1652 				       bool wrprot)
1653 {
1654 	struct kvm_mmu_page *root;
1655 
1656 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1657 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1658 }
1659 
1660 static void zap_collapsible_spte_range(struct kvm *kvm,
1661 				       struct kvm_mmu_page *root,
1662 				       const struct kvm_memory_slot *slot)
1663 {
1664 	gfn_t start = slot->base_gfn;
1665 	gfn_t end = start + slot->npages;
1666 	struct tdp_iter iter;
1667 	int max_mapping_level;
1668 
1669 	rcu_read_lock();
1670 
1671 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1672 retry:
1673 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1674 			continue;
1675 
1676 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1677 		    !is_shadow_present_pte(iter.old_spte))
1678 			continue;
1679 
1680 		/*
1681 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1682 		 * a large page size, then its parent would have been zapped
1683 		 * instead of stepping down.
1684 		 */
1685 		if (is_last_spte(iter.old_spte, iter.level))
1686 			continue;
1687 
1688 		/*
1689 		 * If iter.gfn resides outside of the slot, i.e. the page for
1690 		 * the current level overlaps but is not contained by the slot,
1691 		 * then the SPTE can't be made huge.  More importantly, trying
1692 		 * to query that info from slot->arch.lpage_info will cause an
1693 		 * out-of-bounds access.
1694 		 */
1695 		if (iter.gfn < start || iter.gfn >= end)
1696 			continue;
1697 
1698 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1699 							      iter.gfn, PG_LEVEL_NUM);
1700 		if (max_mapping_level < iter.level)
1701 			continue;
1702 
1703 		/* Note, a successful atomic zap also does a remote TLB flush. */
1704 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1705 			goto retry;
1706 	}
1707 
1708 	rcu_read_unlock();
1709 }
1710 
1711 /*
1712  * Zap non-leaf SPTEs (and free their associated page tables) which could
1713  * be replaced by huge pages, for GFNs within the slot.
1714  */
1715 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1716 				       const struct kvm_memory_slot *slot)
1717 {
1718 	struct kvm_mmu_page *root;
1719 
1720 	lockdep_assert_held_read(&kvm->mmu_lock);
1721 
1722 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1723 		zap_collapsible_spte_range(kvm, root, slot);
1724 }
1725 
1726 /*
1727  * Removes write access on the last level SPTE mapping this GFN and unsets the
1728  * MMU-writable bit to ensure future writes continue to be intercepted.
1729  * Returns true if an SPTE was set and a TLB flush is needed.
1730  */
1731 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1732 			      gfn_t gfn, int min_level)
1733 {
1734 	struct tdp_iter iter;
1735 	u64 new_spte;
1736 	bool spte_set = false;
1737 
1738 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1739 
1740 	rcu_read_lock();
1741 
1742 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1743 		if (!is_shadow_present_pte(iter.old_spte) ||
1744 		    !is_last_spte(iter.old_spte, iter.level))
1745 			continue;
1746 
1747 		new_spte = iter.old_spte &
1748 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1749 
1750 		if (new_spte == iter.old_spte)
1751 			break;
1752 
1753 		tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1754 		spte_set = true;
1755 	}
1756 
1757 	rcu_read_unlock();
1758 
1759 	return spte_set;
1760 }
1761 
1762 /*
1763  * Removes write access on the last level SPTE mapping this GFN and unsets the
1764  * MMU-writable bit to ensure future writes continue to be intercepted.
1765  * Returns true if an SPTE was set and a TLB flush is needed.
1766  */
1767 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1768 				   struct kvm_memory_slot *slot, gfn_t gfn,
1769 				   int min_level)
1770 {
1771 	struct kvm_mmu_page *root;
1772 	bool spte_set = false;
1773 
1774 	lockdep_assert_held_write(&kvm->mmu_lock);
1775 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1776 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1777 
1778 	return spte_set;
1779 }
1780 
1781 /*
1782  * Return the level of the lowest level SPTE added to sptes.
1783  * That SPTE may be non-present.
1784  *
1785  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1786  */
1787 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1788 			 int *root_level)
1789 {
1790 	struct tdp_iter iter;
1791 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1792 	gfn_t gfn = addr >> PAGE_SHIFT;
1793 	int leaf = -1;
1794 
1795 	*root_level = vcpu->arch.mmu->root_role.level;
1796 
1797 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1798 		leaf = iter.level;
1799 		sptes[leaf] = iter.old_spte;
1800 	}
1801 
1802 	return leaf;
1803 }
1804 
1805 /*
1806  * Returns the last level spte pointer of the shadow page walk for the given
1807  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1808  * walk could be performed, returns NULL and *spte does not contain valid data.
1809  *
1810  * Contract:
1811  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1812  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1813  *
1814  * WARNING: This function is only intended to be called during fast_page_fault.
1815  */
1816 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1817 					u64 *spte)
1818 {
1819 	struct tdp_iter iter;
1820 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1821 	gfn_t gfn = addr >> PAGE_SHIFT;
1822 	tdp_ptep_t sptep = NULL;
1823 
1824 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1825 		*spte = iter.old_spte;
1826 		sptep = iter.sptep;
1827 	}
1828 
1829 	/*
1830 	 * Perform the rcu_dereference to get the raw spte pointer value since
1831 	 * we are passing it up to fast_page_fault, which is shared with the
1832 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1833 	 * annotation.
1834 	 *
1835 	 * This is safe since fast_page_fault obeys the contracts of this
1836 	 * function as well as all TDP MMU contracts around modifying SPTEs
1837 	 * outside of mmu_lock.
1838 	 */
1839 	return rcu_dereference(sptep);
1840 }
1841