xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 657c45b3)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10 
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13 
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 	struct workqueue_struct *wq;
18 
19 	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
20 	if (!wq)
21 		return -ENOMEM;
22 
23 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
24 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
25 	kvm->arch.tdp_mmu_zap_wq = wq;
26 	return 1;
27 }
28 
29 /* Arbitrarily returns true so that this may be used in if statements. */
30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
31 							     bool shared)
32 {
33 	if (shared)
34 		lockdep_assert_held_read(&kvm->mmu_lock);
35 	else
36 		lockdep_assert_held_write(&kvm->mmu_lock);
37 
38 	return true;
39 }
40 
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42 {
43 	/*
44 	 * Invalidate all roots, which besides the obvious, schedules all roots
45 	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
46 	 * ultimately frees all roots.
47 	 */
48 	kvm_tdp_mmu_invalidate_all_roots(kvm);
49 
50 	/*
51 	 * Destroying a workqueue also first flushes the workqueue, i.e. no
52 	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
53 	 */
54 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
55 
56 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
57 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
58 
59 	/*
60 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
61 	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
62 	 * can call kvm_tdp_mmu_put_root and create new callbacks.
63 	 */
64 	rcu_barrier();
65 }
66 
67 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
68 {
69 	free_page((unsigned long)sp->spt);
70 	kmem_cache_free(mmu_page_header_cache, sp);
71 }
72 
73 /*
74  * This is called through call_rcu in order to free TDP page table memory
75  * safely with respect to other kernel threads that may be operating on
76  * the memory.
77  * By only accessing TDP MMU page table memory in an RCU read critical
78  * section, and freeing it after a grace period, lockless access to that
79  * memory won't use it after it is freed.
80  */
81 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
82 {
83 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
84 					       rcu_head);
85 
86 	tdp_mmu_free_sp(sp);
87 }
88 
89 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
90 			     bool shared);
91 
92 static void tdp_mmu_zap_root_work(struct work_struct *work)
93 {
94 	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
95 						 tdp_mmu_async_work);
96 	struct kvm *kvm = root->tdp_mmu_async_data;
97 
98 	read_lock(&kvm->mmu_lock);
99 
100 	/*
101 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
102 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
103 	 * to a different pCPU.  Note, the local TLB flush on reuse also
104 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
105 	 * intermediate paging structures, that may be zapped, as such entries
106 	 * are associated with the ASID on both VMX and SVM.
107 	 */
108 	tdp_mmu_zap_root(kvm, root, true);
109 
110 	/*
111 	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
112 	 * avoiding an infinite loop.  By design, the root is reachable while
113 	 * it's being asynchronously zapped, thus a different task can put its
114 	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
115 	 * asynchronously zapped root is unavoidable.
116 	 */
117 	kvm_tdp_mmu_put_root(kvm, root, true);
118 
119 	read_unlock(&kvm->mmu_lock);
120 }
121 
122 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
123 {
124 	root->tdp_mmu_async_data = kvm;
125 	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
126 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
127 }
128 
129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
130 			  bool shared)
131 {
132 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
133 
134 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
135 		return;
136 
137 	/*
138 	 * The TDP MMU itself holds a reference to each root until the root is
139 	 * explicitly invalidated, i.e. the final reference should be never be
140 	 * put for a valid root.
141 	 */
142 	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
143 
144 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
145 	list_del_rcu(&root->link);
146 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
147 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
148 }
149 
150 /*
151  * Returns the next root after @prev_root (or the first root if @prev_root is
152  * NULL).  A reference to the returned root is acquired, and the reference to
153  * @prev_root is released (the caller obviously must hold a reference to
154  * @prev_root if it's non-NULL).
155  *
156  * If @only_valid is true, invalid roots are skipped.
157  *
158  * Returns NULL if the end of tdp_mmu_roots was reached.
159  */
160 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
161 					      struct kvm_mmu_page *prev_root,
162 					      bool shared, bool only_valid)
163 {
164 	struct kvm_mmu_page *next_root;
165 
166 	rcu_read_lock();
167 
168 	if (prev_root)
169 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
170 						  &prev_root->link,
171 						  typeof(*prev_root), link);
172 	else
173 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
174 						   typeof(*next_root), link);
175 
176 	while (next_root) {
177 		if ((!only_valid || !next_root->role.invalid) &&
178 		    kvm_tdp_mmu_get_root(next_root))
179 			break;
180 
181 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
182 				&next_root->link, typeof(*next_root), link);
183 	}
184 
185 	rcu_read_unlock();
186 
187 	if (prev_root)
188 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
189 
190 	return next_root;
191 }
192 
193 /*
194  * Note: this iterator gets and puts references to the roots it iterates over.
195  * This makes it safe to release the MMU lock and yield within the loop, but
196  * if exiting the loop early, the caller must drop the reference to the most
197  * recent root. (Unless keeping a live reference is desirable.)
198  *
199  * If shared is set, this function is operating under the MMU lock in read
200  * mode. In the unlikely event that this thread must free a root, the lock
201  * will be temporarily dropped and reacquired in write mode.
202  */
203 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
204 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
205 	     _root;								\
206 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
207 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
208 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
209 		} else
210 
211 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
212 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
213 
214 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
215 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
216 
217 /*
218  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
219  * the implication being that any flow that holds mmu_lock for read is
220  * inherently yield-friendly and should use the yield-safe variant above.
221  * Holding mmu_lock for write obviates the need for RCU protection as the list
222  * is guaranteed to be stable.
223  */
224 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
225 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
226 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
227 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
228 		} else
229 
230 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
231 {
232 	struct kvm_mmu_page *sp;
233 
234 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
235 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
236 
237 	return sp;
238 }
239 
240 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
241 			    gfn_t gfn, union kvm_mmu_page_role role)
242 {
243 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
244 
245 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
246 
247 	sp->role = role;
248 	sp->gfn = gfn;
249 	sp->ptep = sptep;
250 	sp->tdp_mmu_page = true;
251 
252 	trace_kvm_mmu_get_page(sp, true);
253 }
254 
255 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
256 				  struct tdp_iter *iter)
257 {
258 	struct kvm_mmu_page *parent_sp;
259 	union kvm_mmu_page_role role;
260 
261 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
262 
263 	role = parent_sp->role;
264 	role.level--;
265 
266 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
267 }
268 
269 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
270 {
271 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
272 	struct kvm *kvm = vcpu->kvm;
273 	struct kvm_mmu_page *root;
274 
275 	lockdep_assert_held_write(&kvm->mmu_lock);
276 
277 	/*
278 	 * Check for an existing root before allocating a new one.  Note, the
279 	 * role check prevents consuming an invalid root.
280 	 */
281 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
282 		if (root->role.word == role.word &&
283 		    kvm_tdp_mmu_get_root(root))
284 			goto out;
285 	}
286 
287 	root = tdp_mmu_alloc_sp(vcpu);
288 	tdp_mmu_init_sp(root, NULL, 0, role);
289 
290 	/*
291 	 * TDP MMU roots are kept until they are explicitly invalidated, either
292 	 * by a memslot update or by the destruction of the VM.  Initialize the
293 	 * refcount to two; one reference for the vCPU, and one reference for
294 	 * the TDP MMU itself, which is held until the root is invalidated and
295 	 * is ultimately put by tdp_mmu_zap_root_work().
296 	 */
297 	refcount_set(&root->tdp_mmu_root_count, 2);
298 
299 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
300 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
301 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
302 
303 out:
304 	return __pa(root->spt);
305 }
306 
307 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
308 				u64 old_spte, u64 new_spte, int level,
309 				bool shared);
310 
311 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
312 {
313 	kvm_account_pgtable_pages((void *)sp->spt, +1);
314 	atomic64_inc(&kvm->arch.tdp_mmu_pages);
315 }
316 
317 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
318 {
319 	kvm_account_pgtable_pages((void *)sp->spt, -1);
320 	atomic64_dec(&kvm->arch.tdp_mmu_pages);
321 }
322 
323 /**
324  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
325  *
326  * @kvm: kvm instance
327  * @sp: the page to be removed
328  * @shared: This operation may not be running under the exclusive use of
329  *	    the MMU lock and the operation must synchronize with other
330  *	    threads that might be adding or removing pages.
331  */
332 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
333 			      bool shared)
334 {
335 	tdp_unaccount_mmu_page(kvm, sp);
336 
337 	if (!sp->nx_huge_page_disallowed)
338 		return;
339 
340 	if (shared)
341 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
342 	else
343 		lockdep_assert_held_write(&kvm->mmu_lock);
344 
345 	sp->nx_huge_page_disallowed = false;
346 	untrack_possible_nx_huge_page(kvm, sp);
347 
348 	if (shared)
349 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
350 }
351 
352 /**
353  * handle_removed_pt() - handle a page table removed from the TDP structure
354  *
355  * @kvm: kvm instance
356  * @pt: the page removed from the paging structure
357  * @shared: This operation may not be running under the exclusive use
358  *	    of the MMU lock and the operation must synchronize with other
359  *	    threads that might be modifying SPTEs.
360  *
361  * Given a page table that has been removed from the TDP paging structure,
362  * iterates through the page table to clear SPTEs and free child page tables.
363  *
364  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
365  * protection. Since this thread removed it from the paging structure,
366  * this thread will be responsible for ensuring the page is freed. Hence the
367  * early rcu_dereferences in the function.
368  */
369 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
370 {
371 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
372 	int level = sp->role.level;
373 	gfn_t base_gfn = sp->gfn;
374 	int i;
375 
376 	trace_kvm_mmu_prepare_zap_page(sp);
377 
378 	tdp_mmu_unlink_sp(kvm, sp, shared);
379 
380 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
381 		tdp_ptep_t sptep = pt + i;
382 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
383 		u64 old_spte;
384 
385 		if (shared) {
386 			/*
387 			 * Set the SPTE to a nonpresent value that other
388 			 * threads will not overwrite. If the SPTE was
389 			 * already marked as removed then another thread
390 			 * handling a page fault could overwrite it, so
391 			 * set the SPTE until it is set from some other
392 			 * value to the removed SPTE value.
393 			 */
394 			for (;;) {
395 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
396 				if (!is_removed_spte(old_spte))
397 					break;
398 				cpu_relax();
399 			}
400 		} else {
401 			/*
402 			 * If the SPTE is not MMU-present, there is no backing
403 			 * page associated with the SPTE and so no side effects
404 			 * that need to be recorded, and exclusive ownership of
405 			 * mmu_lock ensures the SPTE can't be made present.
406 			 * Note, zapping MMIO SPTEs is also unnecessary as they
407 			 * are guarded by the memslots generation, not by being
408 			 * unreachable.
409 			 */
410 			old_spte = kvm_tdp_mmu_read_spte(sptep);
411 			if (!is_shadow_present_pte(old_spte))
412 				continue;
413 
414 			/*
415 			 * Use the common helper instead of a raw WRITE_ONCE as
416 			 * the SPTE needs to be updated atomically if it can be
417 			 * modified by a different vCPU outside of mmu_lock.
418 			 * Even though the parent SPTE is !PRESENT, the TLB
419 			 * hasn't yet been flushed, and both Intel and AMD
420 			 * document that A/D assists can use upper-level PxE
421 			 * entries that are cached in the TLB, i.e. the CPU can
422 			 * still access the page and mark it dirty.
423 			 *
424 			 * No retry is needed in the atomic update path as the
425 			 * sole concern is dropping a Dirty bit, i.e. no other
426 			 * task can zap/remove the SPTE as mmu_lock is held for
427 			 * write.  Marking the SPTE as a removed SPTE is not
428 			 * strictly necessary for the same reason, but using
429 			 * the remove SPTE value keeps the shared/exclusive
430 			 * paths consistent and allows the handle_changed_spte()
431 			 * call below to hardcode the new value to REMOVED_SPTE.
432 			 *
433 			 * Note, even though dropping a Dirty bit is the only
434 			 * scenario where a non-atomic update could result in a
435 			 * functional bug, simply checking the Dirty bit isn't
436 			 * sufficient as a fast page fault could read the upper
437 			 * level SPTE before it is zapped, and then make this
438 			 * target SPTE writable, resume the guest, and set the
439 			 * Dirty bit between reading the SPTE above and writing
440 			 * it here.
441 			 */
442 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
443 							  REMOVED_SPTE, level);
444 		}
445 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
446 				    old_spte, REMOVED_SPTE, level, shared);
447 	}
448 
449 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
450 }
451 
452 /**
453  * handle_changed_spte - handle bookkeeping associated with an SPTE change
454  * @kvm: kvm instance
455  * @as_id: the address space of the paging structure the SPTE was a part of
456  * @gfn: the base GFN that was mapped by the SPTE
457  * @old_spte: The value of the SPTE before the change
458  * @new_spte: The value of the SPTE after the change
459  * @level: the level of the PT the SPTE is part of in the paging structure
460  * @shared: This operation may not be running under the exclusive use of
461  *	    the MMU lock and the operation must synchronize with other
462  *	    threads that might be modifying SPTEs.
463  *
464  * Handle bookkeeping that might result from the modification of a SPTE.  Note,
465  * dirty logging updates are handled in common code, not here (see make_spte()
466  * and fast_pf_fix_direct_spte()).
467  */
468 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
469 				u64 old_spte, u64 new_spte, int level,
470 				bool shared)
471 {
472 	bool was_present = is_shadow_present_pte(old_spte);
473 	bool is_present = is_shadow_present_pte(new_spte);
474 	bool was_leaf = was_present && is_last_spte(old_spte, level);
475 	bool is_leaf = is_present && is_last_spte(new_spte, level);
476 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
477 
478 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
479 	WARN_ON(level < PG_LEVEL_4K);
480 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
481 
482 	/*
483 	 * If this warning were to trigger it would indicate that there was a
484 	 * missing MMU notifier or a race with some notifier handler.
485 	 * A present, leaf SPTE should never be directly replaced with another
486 	 * present leaf SPTE pointing to a different PFN. A notifier handler
487 	 * should be zapping the SPTE before the main MM's page table is
488 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
489 	 * thread before replacement.
490 	 */
491 	if (was_leaf && is_leaf && pfn_changed) {
492 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
493 		       "SPTE with another present leaf SPTE mapping a\n"
494 		       "different PFN!\n"
495 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
496 		       as_id, gfn, old_spte, new_spte, level);
497 
498 		/*
499 		 * Crash the host to prevent error propagation and guest data
500 		 * corruption.
501 		 */
502 		BUG();
503 	}
504 
505 	if (old_spte == new_spte)
506 		return;
507 
508 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
509 
510 	if (is_leaf)
511 		check_spte_writable_invariants(new_spte);
512 
513 	/*
514 	 * The only times a SPTE should be changed from a non-present to
515 	 * non-present state is when an MMIO entry is installed/modified/
516 	 * removed. In that case, there is nothing to do here.
517 	 */
518 	if (!was_present && !is_present) {
519 		/*
520 		 * If this change does not involve a MMIO SPTE or removed SPTE,
521 		 * it is unexpected. Log the change, though it should not
522 		 * impact the guest since both the former and current SPTEs
523 		 * are nonpresent.
524 		 */
525 		if (WARN_ON(!is_mmio_spte(old_spte) &&
526 			    !is_mmio_spte(new_spte) &&
527 			    !is_removed_spte(new_spte)))
528 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
529 			       "should not be replaced with another,\n"
530 			       "different nonpresent SPTE, unless one or both\n"
531 			       "are MMIO SPTEs, or the new SPTE is\n"
532 			       "a temporary removed SPTE.\n"
533 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
534 			       as_id, gfn, old_spte, new_spte, level);
535 		return;
536 	}
537 
538 	if (is_leaf != was_leaf)
539 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
540 
541 	if (was_leaf && is_dirty_spte(old_spte) &&
542 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
543 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
544 
545 	/*
546 	 * Recursively handle child PTs if the change removed a subtree from
547 	 * the paging structure.  Note the WARN on the PFN changing without the
548 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
549 	 * pages are kernel allocations and should never be migrated.
550 	 */
551 	if (was_present && !was_leaf &&
552 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
553 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
554 
555 	if (was_leaf && is_accessed_spte(old_spte) &&
556 	    (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
557 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
558 }
559 
560 /*
561  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
562  * and handle the associated bookkeeping.  Do not mark the page dirty
563  * in KVM's dirty bitmaps.
564  *
565  * If setting the SPTE fails because it has changed, iter->old_spte will be
566  * refreshed to the current value of the spte.
567  *
568  * @kvm: kvm instance
569  * @iter: a tdp_iter instance currently on the SPTE that should be set
570  * @new_spte: The value the SPTE should be set to
571  * Return:
572  * * 0      - If the SPTE was set.
573  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
574  *            no side-effects other than setting iter->old_spte to the last
575  *            known value of the spte.
576  */
577 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
578 					  struct tdp_iter *iter,
579 					  u64 new_spte)
580 {
581 	u64 *sptep = rcu_dereference(iter->sptep);
582 
583 	/*
584 	 * The caller is responsible for ensuring the old SPTE is not a REMOVED
585 	 * SPTE.  KVM should never attempt to zap or manipulate a REMOVED SPTE,
586 	 * and pre-checking before inserting a new SPTE is advantageous as it
587 	 * avoids unnecessary work.
588 	 */
589 	WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
590 
591 	lockdep_assert_held_read(&kvm->mmu_lock);
592 
593 	/*
594 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
595 	 * does not hold the mmu_lock.  On failure, i.e. if a different logical
596 	 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
597 	 * the current value, so the caller operates on fresh data, e.g. if it
598 	 * retries tdp_mmu_set_spte_atomic()
599 	 */
600 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
601 		return -EBUSY;
602 
603 	handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
604 			    new_spte, iter->level, true);
605 
606 	return 0;
607 }
608 
609 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
610 					  struct tdp_iter *iter)
611 {
612 	int ret;
613 
614 	/*
615 	 * Freeze the SPTE by setting it to a special,
616 	 * non-present value. This will stop other threads from
617 	 * immediately installing a present entry in its place
618 	 * before the TLBs are flushed.
619 	 */
620 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
621 	if (ret)
622 		return ret;
623 
624 	kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
625 
626 	/*
627 	 * No other thread can overwrite the removed SPTE as they must either
628 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
629 	 * overwrite the special removed SPTE value. No bookkeeping is needed
630 	 * here since the SPTE is going from non-present to non-present.  Use
631 	 * the raw write helper to avoid an unnecessary check on volatile bits.
632 	 */
633 	__kvm_tdp_mmu_write_spte(iter->sptep, 0);
634 
635 	return 0;
636 }
637 
638 
639 /*
640  * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
641  * @kvm:	      KVM instance
642  * @as_id:	      Address space ID, i.e. regular vs. SMM
643  * @sptep:	      Pointer to the SPTE
644  * @old_spte:	      The current value of the SPTE
645  * @new_spte:	      The new value that will be set for the SPTE
646  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
647  * @level:	      The level _containing_ the SPTE (its parent PT's level)
648  *
649  * Returns the old SPTE value, which _may_ be different than @old_spte if the
650  * SPTE had voldatile bits.
651  */
652 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
653 			    u64 old_spte, u64 new_spte, gfn_t gfn, int level)
654 {
655 	lockdep_assert_held_write(&kvm->mmu_lock);
656 
657 	/*
658 	 * No thread should be using this function to set SPTEs to or from the
659 	 * temporary removed SPTE value.
660 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
661 	 * should be used. If operating under the MMU lock in write mode, the
662 	 * use of the removed SPTE should not be necessary.
663 	 */
664 	WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
665 
666 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
667 
668 	handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
669 	return old_spte;
670 }
671 
672 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
673 					 u64 new_spte)
674 {
675 	WARN_ON_ONCE(iter->yielded);
676 	iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
677 					  iter->old_spte, new_spte,
678 					  iter->gfn, iter->level);
679 }
680 
681 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
682 	for_each_tdp_pte(_iter, _root, _start, _end)
683 
684 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
685 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
686 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
687 		    !is_last_spte(_iter.old_spte, _iter.level))		\
688 			continue;					\
689 		else
690 
691 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
692 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
693 
694 /*
695  * Yield if the MMU lock is contended or this thread needs to return control
696  * to the scheduler.
697  *
698  * If this function should yield and flush is set, it will perform a remote
699  * TLB flush before yielding.
700  *
701  * If this function yields, iter->yielded is set and the caller must skip to
702  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
703  * over the paging structures to allow the iterator to continue its traversal
704  * from the paging structure root.
705  *
706  * Returns true if this function yielded.
707  */
708 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
709 							  struct tdp_iter *iter,
710 							  bool flush, bool shared)
711 {
712 	WARN_ON(iter->yielded);
713 
714 	/* Ensure forward progress has been made before yielding. */
715 	if (iter->next_last_level_gfn == iter->yielded_gfn)
716 		return false;
717 
718 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
719 		if (flush)
720 			kvm_flush_remote_tlbs(kvm);
721 
722 		rcu_read_unlock();
723 
724 		if (shared)
725 			cond_resched_rwlock_read(&kvm->mmu_lock);
726 		else
727 			cond_resched_rwlock_write(&kvm->mmu_lock);
728 
729 		rcu_read_lock();
730 
731 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
732 
733 		iter->yielded = true;
734 	}
735 
736 	return iter->yielded;
737 }
738 
739 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
740 {
741 	/*
742 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
743 	 * a gpa range that would exceed the max gfn, and KVM does not create
744 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
745 	 * the slow emulation path every time.
746 	 */
747 	return kvm_mmu_max_gfn() + 1;
748 }
749 
750 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
751 			       bool shared, int zap_level)
752 {
753 	struct tdp_iter iter;
754 
755 	gfn_t end = tdp_mmu_max_gfn_exclusive();
756 	gfn_t start = 0;
757 
758 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
759 retry:
760 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
761 			continue;
762 
763 		if (!is_shadow_present_pte(iter.old_spte))
764 			continue;
765 
766 		if (iter.level > zap_level)
767 			continue;
768 
769 		if (!shared)
770 			tdp_mmu_iter_set_spte(kvm, &iter, 0);
771 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
772 			goto retry;
773 	}
774 }
775 
776 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
777 			     bool shared)
778 {
779 
780 	/*
781 	 * The root must have an elevated refcount so that it's reachable via
782 	 * mmu_notifier callbacks, which allows this path to yield and drop
783 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
784 	 * must drop all references to relevant pages prior to completing the
785 	 * callback.  Dropping mmu_lock with an unreachable root would result
786 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
787 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
788 	 * dirty accessed bits to the SPTE's associated struct page.
789 	 */
790 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
791 
792 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
793 
794 	rcu_read_lock();
795 
796 	/*
797 	 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
798 	 * split the zap into two passes.  On the first pass, zap at the 1gb
799 	 * level, and then zap top-level SPs on the second pass.  "1gb" is not
800 	 * arbitrary, as KVM must be able to zap a 1gb shadow page without
801 	 * inducing a stall to allow in-place replacement with a 1gb hugepage.
802 	 *
803 	 * Because zapping a SP recurses on its children, stepping down to
804 	 * PG_LEVEL_4K in the iterator itself is unnecessary.
805 	 */
806 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
807 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
808 
809 	rcu_read_unlock();
810 }
811 
812 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
813 {
814 	u64 old_spte;
815 
816 	/*
817 	 * This helper intentionally doesn't allow zapping a root shadow page,
818 	 * which doesn't have a parent page table and thus no associated entry.
819 	 */
820 	if (WARN_ON_ONCE(!sp->ptep))
821 		return false;
822 
823 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
824 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
825 		return false;
826 
827 	tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
828 			 sp->gfn, sp->role.level + 1);
829 
830 	return true;
831 }
832 
833 /*
834  * If can_yield is true, will release the MMU lock and reschedule if the
835  * scheduler needs the CPU or there is contention on the MMU lock. If this
836  * function cannot yield, it will not release the MMU lock or reschedule and
837  * the caller must ensure it does not supply too large a GFN range, or the
838  * operation can cause a soft lockup.
839  */
840 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
841 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
842 {
843 	struct tdp_iter iter;
844 
845 	end = min(end, tdp_mmu_max_gfn_exclusive());
846 
847 	lockdep_assert_held_write(&kvm->mmu_lock);
848 
849 	rcu_read_lock();
850 
851 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
852 		if (can_yield &&
853 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
854 			flush = false;
855 			continue;
856 		}
857 
858 		if (!is_shadow_present_pte(iter.old_spte) ||
859 		    !is_last_spte(iter.old_spte, iter.level))
860 			continue;
861 
862 		tdp_mmu_iter_set_spte(kvm, &iter, 0);
863 		flush = true;
864 	}
865 
866 	rcu_read_unlock();
867 
868 	/*
869 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
870 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
871 	 */
872 	return flush;
873 }
874 
875 /*
876  * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
877  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
878  * more SPTEs were zapped since the MMU lock was last acquired.
879  */
880 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
881 			   bool can_yield, bool flush)
882 {
883 	struct kvm_mmu_page *root;
884 
885 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
886 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
887 
888 	return flush;
889 }
890 
891 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
892 {
893 	struct kvm_mmu_page *root;
894 	int i;
895 
896 	/*
897 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
898 	 * before returning to the caller.  Zap directly even if the root is
899 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
900 	 * all that expensive and mmu_lock is already held, which means the
901 	 * worker has yielded, i.e. flushing the work instead of zapping here
902 	 * isn't guaranteed to be any faster.
903 	 *
904 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
905 	 * is being destroyed or the userspace VMM has exited.  In both cases,
906 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
907 	 */
908 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
909 		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
910 			tdp_mmu_zap_root(kvm, root, false);
911 	}
912 }
913 
914 /*
915  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
916  * zap" completes.
917  */
918 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
919 {
920 	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
921 }
922 
923 /*
924  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
925  * is about to be zapped, e.g. in response to a memslots update.  The actual
926  * zapping is performed asynchronously.  Using a separate workqueue makes it
927  * easy to ensure that the destruction is performed before the "fast zap"
928  * completes, without keeping a separate list of invalidated roots; the list is
929  * effectively the list of work items in the workqueue.
930  *
931  * Note, the asynchronous worker is gifted the TDP MMU's reference.
932  * See kvm_tdp_mmu_get_vcpu_root_hpa().
933  */
934 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
935 {
936 	struct kvm_mmu_page *root;
937 
938 	/*
939 	 * mmu_lock must be held for write to ensure that a root doesn't become
940 	 * invalid while there are active readers (invalidating a root while
941 	 * there are active readers may or may not be problematic in practice,
942 	 * but it's uncharted territory and not supported).
943 	 *
944 	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
945 	 * being destroyed after all references have been put, or if no vCPUs
946 	 * have been created (which means there are no roots), i.e. the VM is
947 	 * being destroyed in an error path of KVM_CREATE_VM.
948 	 */
949 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
950 	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
951 		lockdep_assert_held_write(&kvm->mmu_lock);
952 
953 	/*
954 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
955 	 * be other references to @kvm, i.e. nothing else can invalidate roots
956 	 * or be consuming roots, but walking the list of roots does need to be
957 	 * guarded against roots being deleted by the asynchronous zap worker.
958 	 */
959 	rcu_read_lock();
960 
961 	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
962 		if (!root->role.invalid) {
963 			root->role.invalid = true;
964 			tdp_mmu_schedule_zap_root(kvm, root);
965 		}
966 	}
967 
968 	rcu_read_unlock();
969 }
970 
971 /*
972  * Installs a last-level SPTE to handle a TDP page fault.
973  * (NPT/EPT violation/misconfiguration)
974  */
975 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
976 					  struct kvm_page_fault *fault,
977 					  struct tdp_iter *iter)
978 {
979 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
980 	u64 new_spte;
981 	int ret = RET_PF_FIXED;
982 	bool wrprot = false;
983 
984 	if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
985 		return RET_PF_RETRY;
986 
987 	if (unlikely(!fault->slot))
988 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
989 	else
990 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
991 					 fault->pfn, iter->old_spte, fault->prefetch, true,
992 					 fault->map_writable, &new_spte);
993 
994 	if (new_spte == iter->old_spte)
995 		ret = RET_PF_SPURIOUS;
996 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
997 		return RET_PF_RETRY;
998 	else if (is_shadow_present_pte(iter->old_spte) &&
999 		 !is_last_spte(iter->old_spte, iter->level))
1000 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1001 
1002 	/*
1003 	 * If the page fault was caused by a write but the page is write
1004 	 * protected, emulation is needed. If the emulation was skipped,
1005 	 * the vCPU would have the same fault again.
1006 	 */
1007 	if (wrprot) {
1008 		if (fault->write)
1009 			ret = RET_PF_EMULATE;
1010 	}
1011 
1012 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1013 	if (unlikely(is_mmio_spte(new_spte))) {
1014 		vcpu->stat.pf_mmio_spte_created++;
1015 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1016 				     new_spte);
1017 		ret = RET_PF_EMULATE;
1018 	} else {
1019 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1020 				       rcu_dereference(iter->sptep));
1021 	}
1022 
1023 	return ret;
1024 }
1025 
1026 /*
1027  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1028  * provided page table.
1029  *
1030  * @kvm: kvm instance
1031  * @iter: a tdp_iter instance currently on the SPTE that should be set
1032  * @sp: The new TDP page table to install.
1033  * @shared: This operation is running under the MMU lock in read mode.
1034  *
1035  * Returns: 0 if the new page table was installed. Non-0 if the page table
1036  *          could not be installed (e.g. the atomic compare-exchange failed).
1037  */
1038 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1039 			   struct kvm_mmu_page *sp, bool shared)
1040 {
1041 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1042 	int ret = 0;
1043 
1044 	if (shared) {
1045 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1046 		if (ret)
1047 			return ret;
1048 	} else {
1049 		tdp_mmu_iter_set_spte(kvm, iter, spte);
1050 	}
1051 
1052 	tdp_account_mmu_page(kvm, sp);
1053 
1054 	return 0;
1055 }
1056 
1057 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1058 				   struct kvm_mmu_page *sp, bool shared);
1059 
1060 /*
1061  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1062  * page tables and SPTEs to translate the faulting guest physical address.
1063  */
1064 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1065 {
1066 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1067 	struct kvm *kvm = vcpu->kvm;
1068 	struct tdp_iter iter;
1069 	struct kvm_mmu_page *sp;
1070 	int ret = RET_PF_RETRY;
1071 
1072 	kvm_mmu_hugepage_adjust(vcpu, fault);
1073 
1074 	trace_kvm_mmu_spte_requested(fault);
1075 
1076 	rcu_read_lock();
1077 
1078 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1079 		int r;
1080 
1081 		if (fault->nx_huge_page_workaround_enabled)
1082 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1083 
1084 		/*
1085 		 * If SPTE has been frozen by another thread, just give up and
1086 		 * retry, avoiding unnecessary page table allocation and free.
1087 		 */
1088 		if (is_removed_spte(iter.old_spte))
1089 			goto retry;
1090 
1091 		if (iter.level == fault->goal_level)
1092 			goto map_target_level;
1093 
1094 		/* Step down into the lower level page table if it exists. */
1095 		if (is_shadow_present_pte(iter.old_spte) &&
1096 		    !is_large_pte(iter.old_spte))
1097 			continue;
1098 
1099 		/*
1100 		 * The SPTE is either non-present or points to a huge page that
1101 		 * needs to be split.
1102 		 */
1103 		sp = tdp_mmu_alloc_sp(vcpu);
1104 		tdp_mmu_init_child_sp(sp, &iter);
1105 
1106 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1107 
1108 		if (is_shadow_present_pte(iter.old_spte))
1109 			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1110 		else
1111 			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1112 
1113 		/*
1114 		 * Force the guest to retry if installing an upper level SPTE
1115 		 * failed, e.g. because a different task modified the SPTE.
1116 		 */
1117 		if (r) {
1118 			tdp_mmu_free_sp(sp);
1119 			goto retry;
1120 		}
1121 
1122 		if (fault->huge_page_disallowed &&
1123 		    fault->req_level >= iter.level) {
1124 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1125 			if (sp->nx_huge_page_disallowed)
1126 				track_possible_nx_huge_page(kvm, sp);
1127 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1128 		}
1129 	}
1130 
1131 	/*
1132 	 * The walk aborted before reaching the target level, e.g. because the
1133 	 * iterator detected an upper level SPTE was frozen during traversal.
1134 	 */
1135 	WARN_ON_ONCE(iter.level == fault->goal_level);
1136 	goto retry;
1137 
1138 map_target_level:
1139 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1140 
1141 retry:
1142 	rcu_read_unlock();
1143 	return ret;
1144 }
1145 
1146 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1147 				 bool flush)
1148 {
1149 	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
1150 				     range->end, range->may_block, flush);
1151 }
1152 
1153 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1154 			      struct kvm_gfn_range *range);
1155 
1156 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1157 						   struct kvm_gfn_range *range,
1158 						   tdp_handler_t handler)
1159 {
1160 	struct kvm_mmu_page *root;
1161 	struct tdp_iter iter;
1162 	bool ret = false;
1163 
1164 	/*
1165 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1166 	 * into this helper allow blocking; it'd be dead, wasteful code.
1167 	 */
1168 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1169 		rcu_read_lock();
1170 
1171 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1172 			ret |= handler(kvm, &iter, range);
1173 
1174 		rcu_read_unlock();
1175 	}
1176 
1177 	return ret;
1178 }
1179 
1180 /*
1181  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1182  * if any of the GFNs in the range have been accessed.
1183  *
1184  * No need to mark the corresponding PFN as accessed as this call is coming
1185  * from the clear_young() or clear_flush_young() notifier, which uses the
1186  * return value to determine if the page has been accessed.
1187  */
1188 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1189 			  struct kvm_gfn_range *range)
1190 {
1191 	u64 new_spte;
1192 
1193 	/* If we have a non-accessed entry we don't need to change the pte. */
1194 	if (!is_accessed_spte(iter->old_spte))
1195 		return false;
1196 
1197 	if (spte_ad_enabled(iter->old_spte)) {
1198 		iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1199 							 iter->old_spte,
1200 							 shadow_accessed_mask,
1201 							 iter->level);
1202 		new_spte = iter->old_spte & ~shadow_accessed_mask;
1203 	} else {
1204 		/*
1205 		 * Capture the dirty status of the page, so that it doesn't get
1206 		 * lost when the SPTE is marked for access tracking.
1207 		 */
1208 		if (is_writable_pte(iter->old_spte))
1209 			kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1210 
1211 		new_spte = mark_spte_for_access_track(iter->old_spte);
1212 		iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1213 							iter->old_spte, new_spte,
1214 							iter->level);
1215 	}
1216 
1217 	trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1218 				       iter->old_spte, new_spte);
1219 	return true;
1220 }
1221 
1222 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1223 {
1224 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1225 }
1226 
1227 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1228 			 struct kvm_gfn_range *range)
1229 {
1230 	return is_accessed_spte(iter->old_spte);
1231 }
1232 
1233 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1234 {
1235 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1236 }
1237 
1238 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1239 			 struct kvm_gfn_range *range)
1240 {
1241 	u64 new_spte;
1242 
1243 	/* Huge pages aren't expected to be modified without first being zapped. */
1244 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1245 
1246 	if (iter->level != PG_LEVEL_4K ||
1247 	    !is_shadow_present_pte(iter->old_spte))
1248 		return false;
1249 
1250 	/*
1251 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
1252 	 * zero the SPTE before setting the new PFN, but doing so preserves the
1253 	 * invariant that the PFN of a present * leaf SPTE can never change.
1254 	 * See handle_changed_spte().
1255 	 */
1256 	tdp_mmu_iter_set_spte(kvm, iter, 0);
1257 
1258 	if (!pte_write(range->pte)) {
1259 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1260 								  pte_pfn(range->pte));
1261 
1262 		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1263 	}
1264 
1265 	return true;
1266 }
1267 
1268 /*
1269  * Handle the changed_pte MMU notifier for the TDP MMU.
1270  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1271  * notifier.
1272  * Returns non-zero if a flush is needed before releasing the MMU lock.
1273  */
1274 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1275 {
1276 	/*
1277 	 * No need to handle the remote TLB flush under RCU protection, the
1278 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1279 	 * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1280 	 */
1281 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1282 }
1283 
1284 /*
1285  * Remove write access from all SPTEs at or above min_level that map GFNs
1286  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1287  * be flushed.
1288  */
1289 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1290 			     gfn_t start, gfn_t end, int min_level)
1291 {
1292 	struct tdp_iter iter;
1293 	u64 new_spte;
1294 	bool spte_set = false;
1295 
1296 	rcu_read_lock();
1297 
1298 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1299 
1300 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1301 retry:
1302 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1303 			continue;
1304 
1305 		if (!is_shadow_present_pte(iter.old_spte) ||
1306 		    !is_last_spte(iter.old_spte, iter.level) ||
1307 		    !(iter.old_spte & PT_WRITABLE_MASK))
1308 			continue;
1309 
1310 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1311 
1312 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1313 			goto retry;
1314 
1315 		spte_set = true;
1316 	}
1317 
1318 	rcu_read_unlock();
1319 	return spte_set;
1320 }
1321 
1322 /*
1323  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1324  * only affect leaf SPTEs down to min_level.
1325  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1326  */
1327 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1328 			     const struct kvm_memory_slot *slot, int min_level)
1329 {
1330 	struct kvm_mmu_page *root;
1331 	bool spte_set = false;
1332 
1333 	lockdep_assert_held_read(&kvm->mmu_lock);
1334 
1335 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1336 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1337 			     slot->base_gfn + slot->npages, min_level);
1338 
1339 	return spte_set;
1340 }
1341 
1342 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1343 {
1344 	struct kvm_mmu_page *sp;
1345 
1346 	gfp |= __GFP_ZERO;
1347 
1348 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1349 	if (!sp)
1350 		return NULL;
1351 
1352 	sp->spt = (void *)__get_free_page(gfp);
1353 	if (!sp->spt) {
1354 		kmem_cache_free(mmu_page_header_cache, sp);
1355 		return NULL;
1356 	}
1357 
1358 	return sp;
1359 }
1360 
1361 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1362 						       struct tdp_iter *iter,
1363 						       bool shared)
1364 {
1365 	struct kvm_mmu_page *sp;
1366 
1367 	/*
1368 	 * Since we are allocating while under the MMU lock we have to be
1369 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1370 	 * reclaim and to avoid making any filesystem callbacks (which can end
1371 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1372 	 *
1373 	 * If this allocation fails we drop the lock and retry with reclaim
1374 	 * allowed.
1375 	 */
1376 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1377 	if (sp)
1378 		return sp;
1379 
1380 	rcu_read_unlock();
1381 
1382 	if (shared)
1383 		read_unlock(&kvm->mmu_lock);
1384 	else
1385 		write_unlock(&kvm->mmu_lock);
1386 
1387 	iter->yielded = true;
1388 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1389 
1390 	if (shared)
1391 		read_lock(&kvm->mmu_lock);
1392 	else
1393 		write_lock(&kvm->mmu_lock);
1394 
1395 	rcu_read_lock();
1396 
1397 	return sp;
1398 }
1399 
1400 /* Note, the caller is responsible for initializing @sp. */
1401 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1402 				   struct kvm_mmu_page *sp, bool shared)
1403 {
1404 	const u64 huge_spte = iter->old_spte;
1405 	const int level = iter->level;
1406 	int ret, i;
1407 
1408 	/*
1409 	 * No need for atomics when writing to sp->spt since the page table has
1410 	 * not been linked in yet and thus is not reachable from any other CPU.
1411 	 */
1412 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1413 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1414 
1415 	/*
1416 	 * Replace the huge spte with a pointer to the populated lower level
1417 	 * page table. Since we are making this change without a TLB flush vCPUs
1418 	 * will see a mix of the split mappings and the original huge mapping,
1419 	 * depending on what's currently in their TLB. This is fine from a
1420 	 * correctness standpoint since the translation will be the same either
1421 	 * way.
1422 	 */
1423 	ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1424 	if (ret)
1425 		goto out;
1426 
1427 	/*
1428 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1429 	 * are overwriting from the page stats. But we have to manually update
1430 	 * the page stats with the new present child pages.
1431 	 */
1432 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1433 
1434 out:
1435 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1436 	return ret;
1437 }
1438 
1439 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1440 					 struct kvm_mmu_page *root,
1441 					 gfn_t start, gfn_t end,
1442 					 int target_level, bool shared)
1443 {
1444 	struct kvm_mmu_page *sp = NULL;
1445 	struct tdp_iter iter;
1446 	int ret = 0;
1447 
1448 	rcu_read_lock();
1449 
1450 	/*
1451 	 * Traverse the page table splitting all huge pages above the target
1452 	 * level into one lower level. For example, if we encounter a 1GB page
1453 	 * we split it into 512 2MB pages.
1454 	 *
1455 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1456 	 * to visit an SPTE before ever visiting its children, which means we
1457 	 * will correctly recursively split huge pages that are more than one
1458 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1459 	 * and then splitting each of those to 512 4KB pages).
1460 	 */
1461 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1462 retry:
1463 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1464 			continue;
1465 
1466 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1467 			continue;
1468 
1469 		if (!sp) {
1470 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1471 			if (!sp) {
1472 				ret = -ENOMEM;
1473 				trace_kvm_mmu_split_huge_page(iter.gfn,
1474 							      iter.old_spte,
1475 							      iter.level, ret);
1476 				break;
1477 			}
1478 
1479 			if (iter.yielded)
1480 				continue;
1481 		}
1482 
1483 		tdp_mmu_init_child_sp(sp, &iter);
1484 
1485 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1486 			goto retry;
1487 
1488 		sp = NULL;
1489 	}
1490 
1491 	rcu_read_unlock();
1492 
1493 	/*
1494 	 * It's possible to exit the loop having never used the last sp if, for
1495 	 * example, a vCPU doing HugePage NX splitting wins the race and
1496 	 * installs its own sp in place of the last sp we tried to split.
1497 	 */
1498 	if (sp)
1499 		tdp_mmu_free_sp(sp);
1500 
1501 	return ret;
1502 }
1503 
1504 
1505 /*
1506  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1507  */
1508 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1509 				      const struct kvm_memory_slot *slot,
1510 				      gfn_t start, gfn_t end,
1511 				      int target_level, bool shared)
1512 {
1513 	struct kvm_mmu_page *root;
1514 	int r = 0;
1515 
1516 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1517 
1518 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1519 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1520 		if (r) {
1521 			kvm_tdp_mmu_put_root(kvm, root, shared);
1522 			break;
1523 		}
1524 	}
1525 }
1526 
1527 /*
1528  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1529  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1530  * If AD bits are not enabled, this will require clearing the writable bit on
1531  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1532  * be flushed.
1533  */
1534 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1535 			   gfn_t start, gfn_t end)
1536 {
1537 	u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
1538 	struct tdp_iter iter;
1539 	bool spte_set = false;
1540 
1541 	rcu_read_lock();
1542 
1543 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
1544 retry:
1545 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1546 			continue;
1547 
1548 		if (!is_shadow_present_pte(iter.old_spte))
1549 			continue;
1550 
1551 		MMU_WARN_ON(kvm_ad_enabled() &&
1552 			    spte_ad_need_write_protect(iter.old_spte));
1553 
1554 		if (!(iter.old_spte & dbit))
1555 			continue;
1556 
1557 		if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1558 			goto retry;
1559 
1560 		spte_set = true;
1561 	}
1562 
1563 	rcu_read_unlock();
1564 	return spte_set;
1565 }
1566 
1567 /*
1568  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1569  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1570  * If AD bits are not enabled, this will require clearing the writable bit on
1571  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1572  * be flushed.
1573  */
1574 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1575 				  const struct kvm_memory_slot *slot)
1576 {
1577 	struct kvm_mmu_page *root;
1578 	bool spte_set = false;
1579 
1580 	lockdep_assert_held_read(&kvm->mmu_lock);
1581 
1582 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1583 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1584 				slot->base_gfn + slot->npages);
1585 
1586 	return spte_set;
1587 }
1588 
1589 /*
1590  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1591  * set in mask, starting at gfn. The given memslot is expected to contain all
1592  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1593  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1594  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1595  */
1596 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1597 				  gfn_t gfn, unsigned long mask, bool wrprot)
1598 {
1599 	u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
1600 						   shadow_dirty_mask;
1601 	struct tdp_iter iter;
1602 
1603 	rcu_read_lock();
1604 
1605 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1606 				    gfn + BITS_PER_LONG) {
1607 		if (!mask)
1608 			break;
1609 
1610 		MMU_WARN_ON(kvm_ad_enabled() &&
1611 			    spte_ad_need_write_protect(iter.old_spte));
1612 
1613 		if (iter.level > PG_LEVEL_4K ||
1614 		    !(mask & (1UL << (iter.gfn - gfn))))
1615 			continue;
1616 
1617 		mask &= ~(1UL << (iter.gfn - gfn));
1618 
1619 		if (!(iter.old_spte & dbit))
1620 			continue;
1621 
1622 		iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1623 							iter.old_spte, dbit,
1624 							iter.level);
1625 
1626 		trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1627 					       iter.old_spte,
1628 					       iter.old_spte & ~dbit);
1629 		kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1630 	}
1631 
1632 	rcu_read_unlock();
1633 }
1634 
1635 /*
1636  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1637  * set in mask, starting at gfn. The given memslot is expected to contain all
1638  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1639  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1640  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1641  */
1642 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1643 				       struct kvm_memory_slot *slot,
1644 				       gfn_t gfn, unsigned long mask,
1645 				       bool wrprot)
1646 {
1647 	struct kvm_mmu_page *root;
1648 
1649 	lockdep_assert_held_write(&kvm->mmu_lock);
1650 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1651 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1652 }
1653 
1654 static void zap_collapsible_spte_range(struct kvm *kvm,
1655 				       struct kvm_mmu_page *root,
1656 				       const struct kvm_memory_slot *slot)
1657 {
1658 	gfn_t start = slot->base_gfn;
1659 	gfn_t end = start + slot->npages;
1660 	struct tdp_iter iter;
1661 	int max_mapping_level;
1662 
1663 	rcu_read_lock();
1664 
1665 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1666 retry:
1667 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1668 			continue;
1669 
1670 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1671 		    !is_shadow_present_pte(iter.old_spte))
1672 			continue;
1673 
1674 		/*
1675 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1676 		 * a large page size, then its parent would have been zapped
1677 		 * instead of stepping down.
1678 		 */
1679 		if (is_last_spte(iter.old_spte, iter.level))
1680 			continue;
1681 
1682 		/*
1683 		 * If iter.gfn resides outside of the slot, i.e. the page for
1684 		 * the current level overlaps but is not contained by the slot,
1685 		 * then the SPTE can't be made huge.  More importantly, trying
1686 		 * to query that info from slot->arch.lpage_info will cause an
1687 		 * out-of-bounds access.
1688 		 */
1689 		if (iter.gfn < start || iter.gfn >= end)
1690 			continue;
1691 
1692 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1693 							      iter.gfn, PG_LEVEL_NUM);
1694 		if (max_mapping_level < iter.level)
1695 			continue;
1696 
1697 		/* Note, a successful atomic zap also does a remote TLB flush. */
1698 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1699 			goto retry;
1700 	}
1701 
1702 	rcu_read_unlock();
1703 }
1704 
1705 /*
1706  * Zap non-leaf SPTEs (and free their associated page tables) which could
1707  * be replaced by huge pages, for GFNs within the slot.
1708  */
1709 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1710 				       const struct kvm_memory_slot *slot)
1711 {
1712 	struct kvm_mmu_page *root;
1713 
1714 	lockdep_assert_held_read(&kvm->mmu_lock);
1715 
1716 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1717 		zap_collapsible_spte_range(kvm, root, slot);
1718 }
1719 
1720 /*
1721  * Removes write access on the last level SPTE mapping this GFN and unsets the
1722  * MMU-writable bit to ensure future writes continue to be intercepted.
1723  * Returns true if an SPTE was set and a TLB flush is needed.
1724  */
1725 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1726 			      gfn_t gfn, int min_level)
1727 {
1728 	struct tdp_iter iter;
1729 	u64 new_spte;
1730 	bool spte_set = false;
1731 
1732 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1733 
1734 	rcu_read_lock();
1735 
1736 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1737 		if (!is_shadow_present_pte(iter.old_spte) ||
1738 		    !is_last_spte(iter.old_spte, iter.level))
1739 			continue;
1740 
1741 		new_spte = iter.old_spte &
1742 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1743 
1744 		if (new_spte == iter.old_spte)
1745 			break;
1746 
1747 		tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1748 		spte_set = true;
1749 	}
1750 
1751 	rcu_read_unlock();
1752 
1753 	return spte_set;
1754 }
1755 
1756 /*
1757  * Removes write access on the last level SPTE mapping this GFN and unsets the
1758  * MMU-writable bit to ensure future writes continue to be intercepted.
1759  * Returns true if an SPTE was set and a TLB flush is needed.
1760  */
1761 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1762 				   struct kvm_memory_slot *slot, gfn_t gfn,
1763 				   int min_level)
1764 {
1765 	struct kvm_mmu_page *root;
1766 	bool spte_set = false;
1767 
1768 	lockdep_assert_held_write(&kvm->mmu_lock);
1769 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1770 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1771 
1772 	return spte_set;
1773 }
1774 
1775 /*
1776  * Return the level of the lowest level SPTE added to sptes.
1777  * That SPTE may be non-present.
1778  *
1779  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1780  */
1781 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1782 			 int *root_level)
1783 {
1784 	struct tdp_iter iter;
1785 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1786 	gfn_t gfn = addr >> PAGE_SHIFT;
1787 	int leaf = -1;
1788 
1789 	*root_level = vcpu->arch.mmu->root_role.level;
1790 
1791 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1792 		leaf = iter.level;
1793 		sptes[leaf] = iter.old_spte;
1794 	}
1795 
1796 	return leaf;
1797 }
1798 
1799 /*
1800  * Returns the last level spte pointer of the shadow page walk for the given
1801  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1802  * walk could be performed, returns NULL and *spte does not contain valid data.
1803  *
1804  * Contract:
1805  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1806  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1807  *
1808  * WARNING: This function is only intended to be called during fast_page_fault.
1809  */
1810 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1811 					u64 *spte)
1812 {
1813 	struct tdp_iter iter;
1814 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1815 	gfn_t gfn = addr >> PAGE_SHIFT;
1816 	tdp_ptep_t sptep = NULL;
1817 
1818 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1819 		*spte = iter.old_spte;
1820 		sptep = iter.sptep;
1821 	}
1822 
1823 	/*
1824 	 * Perform the rcu_dereference to get the raw spte pointer value since
1825 	 * we are passing it up to fast_page_fault, which is shared with the
1826 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1827 	 * annotation.
1828 	 *
1829 	 * This is safe since fast_page_fault obeys the contracts of this
1830 	 * function as well as all TDP MMU contracts around modifying SPTEs
1831 	 * outside of mmu_lock.
1832 	 */
1833 	return rcu_dereference(sptep);
1834 }
1835