xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.c (revision 5efb685b)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10 
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13 
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 	struct workqueue_struct *wq;
18 
19 	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
20 	if (!wq)
21 		return -ENOMEM;
22 
23 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
24 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
25 	kvm->arch.tdp_mmu_zap_wq = wq;
26 	return 1;
27 }
28 
29 /* Arbitrarily returns true so that this may be used in if statements. */
30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
31 							     bool shared)
32 {
33 	if (shared)
34 		lockdep_assert_held_read(&kvm->mmu_lock);
35 	else
36 		lockdep_assert_held_write(&kvm->mmu_lock);
37 
38 	return true;
39 }
40 
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42 {
43 	/* Also waits for any queued work items.  */
44 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
45 
46 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
47 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
48 
49 	/*
50 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
51 	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
52 	 * can call kvm_tdp_mmu_put_root and create new callbacks.
53 	 */
54 	rcu_barrier();
55 }
56 
57 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
58 {
59 	free_page((unsigned long)sp->spt);
60 	kmem_cache_free(mmu_page_header_cache, sp);
61 }
62 
63 /*
64  * This is called through call_rcu in order to free TDP page table memory
65  * safely with respect to other kernel threads that may be operating on
66  * the memory.
67  * By only accessing TDP MMU page table memory in an RCU read critical
68  * section, and freeing it after a grace period, lockless access to that
69  * memory won't use it after it is freed.
70  */
71 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
72 {
73 	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
74 					       rcu_head);
75 
76 	tdp_mmu_free_sp(sp);
77 }
78 
79 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
80 			     bool shared);
81 
82 static void tdp_mmu_zap_root_work(struct work_struct *work)
83 {
84 	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
85 						 tdp_mmu_async_work);
86 	struct kvm *kvm = root->tdp_mmu_async_data;
87 
88 	read_lock(&kvm->mmu_lock);
89 
90 	/*
91 	 * A TLB flush is not necessary as KVM performs a local TLB flush when
92 	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
93 	 * to a different pCPU.  Note, the local TLB flush on reuse also
94 	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
95 	 * intermediate paging structures, that may be zapped, as such entries
96 	 * are associated with the ASID on both VMX and SVM.
97 	 */
98 	tdp_mmu_zap_root(kvm, root, true);
99 
100 	/*
101 	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
102 	 * avoiding an infinite loop.  By design, the root is reachable while
103 	 * it's being asynchronously zapped, thus a different task can put its
104 	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
105 	 * asynchronously zapped root is unavoidable.
106 	 */
107 	kvm_tdp_mmu_put_root(kvm, root, true);
108 
109 	read_unlock(&kvm->mmu_lock);
110 }
111 
112 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
113 {
114 	root->tdp_mmu_async_data = kvm;
115 	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
116 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
117 }
118 
119 static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
120 {
121 	union kvm_mmu_page_role role = page->role;
122 	role.invalid = true;
123 
124 	/* No need to use cmpxchg, only the invalid bit can change.  */
125 	role.word = xchg(&page->role.word, role.word);
126 	return role.invalid;
127 }
128 
129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
130 			  bool shared)
131 {
132 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
133 
134 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
135 		return;
136 
137 	WARN_ON(!is_tdp_mmu_page(root));
138 
139 	/*
140 	 * The root now has refcount=0.  It is valid, but readers already
141 	 * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
142 	 * rejects it.  This remains true for the rest of the execution
143 	 * of this function, because readers visit valid roots only
144 	 * (except for tdp_mmu_zap_root_work(), which however
145 	 * does not acquire any reference itself).
146 	 *
147 	 * Even though there are flows that need to visit all roots for
148 	 * correctness, they all take mmu_lock for write, so they cannot yet
149 	 * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
150 	 * since the root still has refcount=0.
151 	 *
152 	 * However, tdp_mmu_zap_root can yield, and writers do not expect to
153 	 * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
154 	 * So the root temporarily gets an extra reference, going to refcount=1
155 	 * while staying invalid.  Readers still cannot acquire any reference;
156 	 * but writers are now allowed to run if tdp_mmu_zap_root yields and
157 	 * they might take an extra reference if they themselves yield.
158 	 * Therefore, when the reference is given back by the worker,
159 	 * there is no guarantee that the refcount is still 1.  If not, whoever
160 	 * puts the last reference will free the page, but they will not have to
161 	 * zap the root because a root cannot go from invalid to valid.
162 	 */
163 	if (!kvm_tdp_root_mark_invalid(root)) {
164 		refcount_set(&root->tdp_mmu_root_count, 1);
165 
166 		/*
167 		 * Zapping the root in a worker is not just "nice to have";
168 		 * it is required because kvm_tdp_mmu_invalidate_all_roots()
169 		 * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
170 		 * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
171 		 * might return with some roots not zapped yet.
172 		 */
173 		tdp_mmu_schedule_zap_root(kvm, root);
174 		return;
175 	}
176 
177 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
178 	list_del_rcu(&root->link);
179 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
180 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
181 }
182 
183 /*
184  * Returns the next root after @prev_root (or the first root if @prev_root is
185  * NULL).  A reference to the returned root is acquired, and the reference to
186  * @prev_root is released (the caller obviously must hold a reference to
187  * @prev_root if it's non-NULL).
188  *
189  * If @only_valid is true, invalid roots are skipped.
190  *
191  * Returns NULL if the end of tdp_mmu_roots was reached.
192  */
193 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
194 					      struct kvm_mmu_page *prev_root,
195 					      bool shared, bool only_valid)
196 {
197 	struct kvm_mmu_page *next_root;
198 
199 	rcu_read_lock();
200 
201 	if (prev_root)
202 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
203 						  &prev_root->link,
204 						  typeof(*prev_root), link);
205 	else
206 		next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
207 						   typeof(*next_root), link);
208 
209 	while (next_root) {
210 		if ((!only_valid || !next_root->role.invalid) &&
211 		    kvm_tdp_mmu_get_root(next_root))
212 			break;
213 
214 		next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
215 				&next_root->link, typeof(*next_root), link);
216 	}
217 
218 	rcu_read_unlock();
219 
220 	if (prev_root)
221 		kvm_tdp_mmu_put_root(kvm, prev_root, shared);
222 
223 	return next_root;
224 }
225 
226 /*
227  * Note: this iterator gets and puts references to the roots it iterates over.
228  * This makes it safe to release the MMU lock and yield within the loop, but
229  * if exiting the loop early, the caller must drop the reference to the most
230  * recent root. (Unless keeping a live reference is desirable.)
231  *
232  * If shared is set, this function is operating under the MMU lock in read
233  * mode. In the unlikely event that this thread must free a root, the lock
234  * will be temporarily dropped and reacquired in write mode.
235  */
236 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
237 	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);	\
238 	     _root;								\
239 	     _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))	\
240 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&		\
241 		    kvm_mmu_page_as_id(_root) != _as_id) {			\
242 		} else
243 
244 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
245 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
246 
247 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
248 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
249 
250 /*
251  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
252  * the implication being that any flow that holds mmu_lock for read is
253  * inherently yield-friendly and should use the yield-safe variant above.
254  * Holding mmu_lock for write obviates the need for RCU protection as the list
255  * is guaranteed to be stable.
256  */
257 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
258 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
259 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&	\
260 		    kvm_mmu_page_as_id(_root) != _as_id) {		\
261 		} else
262 
263 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
264 {
265 	struct kvm_mmu_page *sp;
266 
267 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
268 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
269 
270 	return sp;
271 }
272 
273 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
274 			    gfn_t gfn, union kvm_mmu_page_role role)
275 {
276 	INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
277 
278 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
279 
280 	sp->role = role;
281 	sp->gfn = gfn;
282 	sp->ptep = sptep;
283 	sp->tdp_mmu_page = true;
284 
285 	trace_kvm_mmu_get_page(sp, true);
286 }
287 
288 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
289 				  struct tdp_iter *iter)
290 {
291 	struct kvm_mmu_page *parent_sp;
292 	union kvm_mmu_page_role role;
293 
294 	parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
295 
296 	role = parent_sp->role;
297 	role.level--;
298 
299 	tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
300 }
301 
302 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
303 {
304 	union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
305 	struct kvm *kvm = vcpu->kvm;
306 	struct kvm_mmu_page *root;
307 
308 	lockdep_assert_held_write(&kvm->mmu_lock);
309 
310 	/*
311 	 * Check for an existing root before allocating a new one.  Note, the
312 	 * role check prevents consuming an invalid root.
313 	 */
314 	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
315 		if (root->role.word == role.word &&
316 		    kvm_tdp_mmu_get_root(root))
317 			goto out;
318 	}
319 
320 	root = tdp_mmu_alloc_sp(vcpu);
321 	tdp_mmu_init_sp(root, NULL, 0, role);
322 
323 	refcount_set(&root->tdp_mmu_root_count, 1);
324 
325 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
326 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
327 	spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
328 
329 out:
330 	return __pa(root->spt);
331 }
332 
333 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
334 				u64 old_spte, u64 new_spte, int level,
335 				bool shared);
336 
337 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
338 {
339 	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
340 		return;
341 
342 	if (is_accessed_spte(old_spte) &&
343 	    (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
344 	     spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
345 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
346 }
347 
348 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
349 					  u64 old_spte, u64 new_spte, int level)
350 {
351 	bool pfn_changed;
352 	struct kvm_memory_slot *slot;
353 
354 	if (level > PG_LEVEL_4K)
355 		return;
356 
357 	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
358 
359 	if ((!is_writable_pte(old_spte) || pfn_changed) &&
360 	    is_writable_pte(new_spte)) {
361 		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
362 		mark_page_dirty_in_slot(kvm, slot, gfn);
363 	}
364 }
365 
366 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
367 {
368 	kvm_account_pgtable_pages((void *)sp->spt, +1);
369 	atomic64_inc(&kvm->arch.tdp_mmu_pages);
370 }
371 
372 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
373 {
374 	kvm_account_pgtable_pages((void *)sp->spt, -1);
375 	atomic64_dec(&kvm->arch.tdp_mmu_pages);
376 }
377 
378 /**
379  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
380  *
381  * @kvm: kvm instance
382  * @sp: the page to be removed
383  * @shared: This operation may not be running under the exclusive use of
384  *	    the MMU lock and the operation must synchronize with other
385  *	    threads that might be adding or removing pages.
386  */
387 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
388 			      bool shared)
389 {
390 	tdp_unaccount_mmu_page(kvm, sp);
391 
392 	if (!sp->nx_huge_page_disallowed)
393 		return;
394 
395 	if (shared)
396 		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
397 	else
398 		lockdep_assert_held_write(&kvm->mmu_lock);
399 
400 	sp->nx_huge_page_disallowed = false;
401 	untrack_possible_nx_huge_page(kvm, sp);
402 
403 	if (shared)
404 		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
405 }
406 
407 /**
408  * handle_removed_pt() - handle a page table removed from the TDP structure
409  *
410  * @kvm: kvm instance
411  * @pt: the page removed from the paging structure
412  * @shared: This operation may not be running under the exclusive use
413  *	    of the MMU lock and the operation must synchronize with other
414  *	    threads that might be modifying SPTEs.
415  *
416  * Given a page table that has been removed from the TDP paging structure,
417  * iterates through the page table to clear SPTEs and free child page tables.
418  *
419  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
420  * protection. Since this thread removed it from the paging structure,
421  * this thread will be responsible for ensuring the page is freed. Hence the
422  * early rcu_dereferences in the function.
423  */
424 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
425 {
426 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
427 	int level = sp->role.level;
428 	gfn_t base_gfn = sp->gfn;
429 	int i;
430 
431 	trace_kvm_mmu_prepare_zap_page(sp);
432 
433 	tdp_mmu_unlink_sp(kvm, sp, shared);
434 
435 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
436 		tdp_ptep_t sptep = pt + i;
437 		gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
438 		u64 old_spte;
439 
440 		if (shared) {
441 			/*
442 			 * Set the SPTE to a nonpresent value that other
443 			 * threads will not overwrite. If the SPTE was
444 			 * already marked as removed then another thread
445 			 * handling a page fault could overwrite it, so
446 			 * set the SPTE until it is set from some other
447 			 * value to the removed SPTE value.
448 			 */
449 			for (;;) {
450 				old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
451 				if (!is_removed_spte(old_spte))
452 					break;
453 				cpu_relax();
454 			}
455 		} else {
456 			/*
457 			 * If the SPTE is not MMU-present, there is no backing
458 			 * page associated with the SPTE and so no side effects
459 			 * that need to be recorded, and exclusive ownership of
460 			 * mmu_lock ensures the SPTE can't be made present.
461 			 * Note, zapping MMIO SPTEs is also unnecessary as they
462 			 * are guarded by the memslots generation, not by being
463 			 * unreachable.
464 			 */
465 			old_spte = kvm_tdp_mmu_read_spte(sptep);
466 			if (!is_shadow_present_pte(old_spte))
467 				continue;
468 
469 			/*
470 			 * Use the common helper instead of a raw WRITE_ONCE as
471 			 * the SPTE needs to be updated atomically if it can be
472 			 * modified by a different vCPU outside of mmu_lock.
473 			 * Even though the parent SPTE is !PRESENT, the TLB
474 			 * hasn't yet been flushed, and both Intel and AMD
475 			 * document that A/D assists can use upper-level PxE
476 			 * entries that are cached in the TLB, i.e. the CPU can
477 			 * still access the page and mark it dirty.
478 			 *
479 			 * No retry is needed in the atomic update path as the
480 			 * sole concern is dropping a Dirty bit, i.e. no other
481 			 * task can zap/remove the SPTE as mmu_lock is held for
482 			 * write.  Marking the SPTE as a removed SPTE is not
483 			 * strictly necessary for the same reason, but using
484 			 * the remove SPTE value keeps the shared/exclusive
485 			 * paths consistent and allows the handle_changed_spte()
486 			 * call below to hardcode the new value to REMOVED_SPTE.
487 			 *
488 			 * Note, even though dropping a Dirty bit is the only
489 			 * scenario where a non-atomic update could result in a
490 			 * functional bug, simply checking the Dirty bit isn't
491 			 * sufficient as a fast page fault could read the upper
492 			 * level SPTE before it is zapped, and then make this
493 			 * target SPTE writable, resume the guest, and set the
494 			 * Dirty bit between reading the SPTE above and writing
495 			 * it here.
496 			 */
497 			old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
498 							  REMOVED_SPTE, level);
499 		}
500 		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
501 				    old_spte, REMOVED_SPTE, level, shared);
502 	}
503 
504 	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
505 }
506 
507 /**
508  * __handle_changed_spte - handle bookkeeping associated with an SPTE change
509  * @kvm: kvm instance
510  * @as_id: the address space of the paging structure the SPTE was a part of
511  * @gfn: the base GFN that was mapped by the SPTE
512  * @old_spte: The value of the SPTE before the change
513  * @new_spte: The value of the SPTE after the change
514  * @level: the level of the PT the SPTE is part of in the paging structure
515  * @shared: This operation may not be running under the exclusive use of
516  *	    the MMU lock and the operation must synchronize with other
517  *	    threads that might be modifying SPTEs.
518  *
519  * Handle bookkeeping that might result from the modification of a SPTE.
520  * This function must be called for all TDP SPTE modifications.
521  */
522 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
523 				  u64 old_spte, u64 new_spte, int level,
524 				  bool shared)
525 {
526 	bool was_present = is_shadow_present_pte(old_spte);
527 	bool is_present = is_shadow_present_pte(new_spte);
528 	bool was_leaf = was_present && is_last_spte(old_spte, level);
529 	bool is_leaf = is_present && is_last_spte(new_spte, level);
530 	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
531 
532 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
533 	WARN_ON(level < PG_LEVEL_4K);
534 	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
535 
536 	/*
537 	 * If this warning were to trigger it would indicate that there was a
538 	 * missing MMU notifier or a race with some notifier handler.
539 	 * A present, leaf SPTE should never be directly replaced with another
540 	 * present leaf SPTE pointing to a different PFN. A notifier handler
541 	 * should be zapping the SPTE before the main MM's page table is
542 	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
543 	 * thread before replacement.
544 	 */
545 	if (was_leaf && is_leaf && pfn_changed) {
546 		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
547 		       "SPTE with another present leaf SPTE mapping a\n"
548 		       "different PFN!\n"
549 		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
550 		       as_id, gfn, old_spte, new_spte, level);
551 
552 		/*
553 		 * Crash the host to prevent error propagation and guest data
554 		 * corruption.
555 		 */
556 		BUG();
557 	}
558 
559 	if (old_spte == new_spte)
560 		return;
561 
562 	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
563 
564 	if (is_leaf)
565 		check_spte_writable_invariants(new_spte);
566 
567 	/*
568 	 * The only times a SPTE should be changed from a non-present to
569 	 * non-present state is when an MMIO entry is installed/modified/
570 	 * removed. In that case, there is nothing to do here.
571 	 */
572 	if (!was_present && !is_present) {
573 		/*
574 		 * If this change does not involve a MMIO SPTE or removed SPTE,
575 		 * it is unexpected. Log the change, though it should not
576 		 * impact the guest since both the former and current SPTEs
577 		 * are nonpresent.
578 		 */
579 		if (WARN_ON(!is_mmio_spte(old_spte) &&
580 			    !is_mmio_spte(new_spte) &&
581 			    !is_removed_spte(new_spte)))
582 			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
583 			       "should not be replaced with another,\n"
584 			       "different nonpresent SPTE, unless one or both\n"
585 			       "are MMIO SPTEs, or the new SPTE is\n"
586 			       "a temporary removed SPTE.\n"
587 			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
588 			       as_id, gfn, old_spte, new_spte, level);
589 		return;
590 	}
591 
592 	if (is_leaf != was_leaf)
593 		kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
594 
595 	if (was_leaf && is_dirty_spte(old_spte) &&
596 	    (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
597 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
598 
599 	/*
600 	 * Recursively handle child PTs if the change removed a subtree from
601 	 * the paging structure.  Note the WARN on the PFN changing without the
602 	 * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
603 	 * pages are kernel allocations and should never be migrated.
604 	 */
605 	if (was_present && !was_leaf &&
606 	    (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
607 		handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
608 }
609 
610 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
611 				u64 old_spte, u64 new_spte, int level,
612 				bool shared)
613 {
614 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
615 			      shared);
616 	handle_changed_spte_acc_track(old_spte, new_spte, level);
617 	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
618 				      new_spte, level);
619 }
620 
621 /*
622  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
623  * and handle the associated bookkeeping.  Do not mark the page dirty
624  * in KVM's dirty bitmaps.
625  *
626  * If setting the SPTE fails because it has changed, iter->old_spte will be
627  * refreshed to the current value of the spte.
628  *
629  * @kvm: kvm instance
630  * @iter: a tdp_iter instance currently on the SPTE that should be set
631  * @new_spte: The value the SPTE should be set to
632  * Return:
633  * * 0      - If the SPTE was set.
634  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
635  *            no side-effects other than setting iter->old_spte to the last
636  *            known value of the spte.
637  */
638 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
639 					  struct tdp_iter *iter,
640 					  u64 new_spte)
641 {
642 	u64 *sptep = rcu_dereference(iter->sptep);
643 
644 	/*
645 	 * The caller is responsible for ensuring the old SPTE is not a REMOVED
646 	 * SPTE.  KVM should never attempt to zap or manipulate a REMOVED SPTE,
647 	 * and pre-checking before inserting a new SPTE is advantageous as it
648 	 * avoids unnecessary work.
649 	 */
650 	WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
651 
652 	lockdep_assert_held_read(&kvm->mmu_lock);
653 
654 	/*
655 	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
656 	 * does not hold the mmu_lock.
657 	 */
658 	if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
659 		return -EBUSY;
660 
661 	__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
662 			      new_spte, iter->level, true);
663 	handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
664 
665 	return 0;
666 }
667 
668 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
669 					  struct tdp_iter *iter)
670 {
671 	int ret;
672 
673 	/*
674 	 * Freeze the SPTE by setting it to a special,
675 	 * non-present value. This will stop other threads from
676 	 * immediately installing a present entry in its place
677 	 * before the TLBs are flushed.
678 	 */
679 	ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
680 	if (ret)
681 		return ret;
682 
683 	kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
684 
685 	/*
686 	 * No other thread can overwrite the removed SPTE as they must either
687 	 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
688 	 * overwrite the special removed SPTE value. No bookkeeping is needed
689 	 * here since the SPTE is going from non-present to non-present.  Use
690 	 * the raw write helper to avoid an unnecessary check on volatile bits.
691 	 */
692 	__kvm_tdp_mmu_write_spte(iter->sptep, 0);
693 
694 	return 0;
695 }
696 
697 
698 /*
699  * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
700  * @kvm:	      KVM instance
701  * @as_id:	      Address space ID, i.e. regular vs. SMM
702  * @sptep:	      Pointer to the SPTE
703  * @old_spte:	      The current value of the SPTE
704  * @new_spte:	      The new value that will be set for the SPTE
705  * @gfn:	      The base GFN that was (or will be) mapped by the SPTE
706  * @level:	      The level _containing_ the SPTE (its parent PT's level)
707  * @record_acc_track: Notify the MM subsystem of changes to the accessed state
708  *		      of the page. Should be set unless handling an MMU
709  *		      notifier for access tracking. Leaving record_acc_track
710  *		      unset in that case prevents page accesses from being
711  *		      double counted.
712  * @record_dirty_log: Record the page as dirty in the dirty bitmap if
713  *		      appropriate for the change being made. Should be set
714  *		      unless performing certain dirty logging operations.
715  *		      Leaving record_dirty_log unset in that case prevents page
716  *		      writes from being double counted.
717  *
718  * Returns the old SPTE value, which _may_ be different than @old_spte if the
719  * SPTE had voldatile bits.
720  */
721 static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
722 			      u64 old_spte, u64 new_spte, gfn_t gfn, int level,
723 			      bool record_acc_track, bool record_dirty_log)
724 {
725 	lockdep_assert_held_write(&kvm->mmu_lock);
726 
727 	/*
728 	 * No thread should be using this function to set SPTEs to or from the
729 	 * temporary removed SPTE value.
730 	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
731 	 * should be used. If operating under the MMU lock in write mode, the
732 	 * use of the removed SPTE should not be necessary.
733 	 */
734 	WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
735 
736 	old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
737 
738 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
739 
740 	if (record_acc_track)
741 		handle_changed_spte_acc_track(old_spte, new_spte, level);
742 	if (record_dirty_log)
743 		handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
744 					      new_spte, level);
745 	return old_spte;
746 }
747 
748 static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
749 				     u64 new_spte, bool record_acc_track,
750 				     bool record_dirty_log)
751 {
752 	WARN_ON_ONCE(iter->yielded);
753 
754 	iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
755 					    iter->old_spte, new_spte,
756 					    iter->gfn, iter->level,
757 					    record_acc_track, record_dirty_log);
758 }
759 
760 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
761 				    u64 new_spte)
762 {
763 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
764 }
765 
766 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
767 						 struct tdp_iter *iter,
768 						 u64 new_spte)
769 {
770 	_tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
771 }
772 
773 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
774 						 struct tdp_iter *iter,
775 						 u64 new_spte)
776 {
777 	_tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
778 }
779 
780 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
781 	for_each_tdp_pte(_iter, _root, _start, _end)
782 
783 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
784 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
785 		if (!is_shadow_present_pte(_iter.old_spte) ||		\
786 		    !is_last_spte(_iter.old_spte, _iter.level))		\
787 			continue;					\
788 		else
789 
790 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
791 	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
792 
793 /*
794  * Yield if the MMU lock is contended or this thread needs to return control
795  * to the scheduler.
796  *
797  * If this function should yield and flush is set, it will perform a remote
798  * TLB flush before yielding.
799  *
800  * If this function yields, iter->yielded is set and the caller must skip to
801  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
802  * over the paging structures to allow the iterator to continue its traversal
803  * from the paging structure root.
804  *
805  * Returns true if this function yielded.
806  */
807 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
808 							  struct tdp_iter *iter,
809 							  bool flush, bool shared)
810 {
811 	WARN_ON(iter->yielded);
812 
813 	/* Ensure forward progress has been made before yielding. */
814 	if (iter->next_last_level_gfn == iter->yielded_gfn)
815 		return false;
816 
817 	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
818 		if (flush)
819 			kvm_flush_remote_tlbs(kvm);
820 
821 		rcu_read_unlock();
822 
823 		if (shared)
824 			cond_resched_rwlock_read(&kvm->mmu_lock);
825 		else
826 			cond_resched_rwlock_write(&kvm->mmu_lock);
827 
828 		rcu_read_lock();
829 
830 		WARN_ON(iter->gfn > iter->next_last_level_gfn);
831 
832 		iter->yielded = true;
833 	}
834 
835 	return iter->yielded;
836 }
837 
838 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
839 {
840 	/*
841 	 * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
842 	 * a gpa range that would exceed the max gfn, and KVM does not create
843 	 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
844 	 * the slow emulation path every time.
845 	 */
846 	return kvm_mmu_max_gfn() + 1;
847 }
848 
849 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
850 			       bool shared, int zap_level)
851 {
852 	struct tdp_iter iter;
853 
854 	gfn_t end = tdp_mmu_max_gfn_exclusive();
855 	gfn_t start = 0;
856 
857 	for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
858 retry:
859 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
860 			continue;
861 
862 		if (!is_shadow_present_pte(iter.old_spte))
863 			continue;
864 
865 		if (iter.level > zap_level)
866 			continue;
867 
868 		if (!shared)
869 			tdp_mmu_set_spte(kvm, &iter, 0);
870 		else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
871 			goto retry;
872 	}
873 }
874 
875 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
876 			     bool shared)
877 {
878 
879 	/*
880 	 * The root must have an elevated refcount so that it's reachable via
881 	 * mmu_notifier callbacks, which allows this path to yield and drop
882 	 * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
883 	 * must drop all references to relevant pages prior to completing the
884 	 * callback.  Dropping mmu_lock with an unreachable root would result
885 	 * in zapping SPTEs after a relevant mmu_notifier callback completes
886 	 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
887 	 * dirty accessed bits to the SPTE's associated struct page.
888 	 */
889 	WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
890 
891 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
892 
893 	rcu_read_lock();
894 
895 	/*
896 	 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
897 	 * split the zap into two passes.  On the first pass, zap at the 1gb
898 	 * level, and then zap top-level SPs on the second pass.  "1gb" is not
899 	 * arbitrary, as KVM must be able to zap a 1gb shadow page without
900 	 * inducing a stall to allow in-place replacement with a 1gb hugepage.
901 	 *
902 	 * Because zapping a SP recurses on its children, stepping down to
903 	 * PG_LEVEL_4K in the iterator itself is unnecessary.
904 	 */
905 	__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
906 	__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
907 
908 	rcu_read_unlock();
909 }
910 
911 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
912 {
913 	u64 old_spte;
914 
915 	/*
916 	 * This helper intentionally doesn't allow zapping a root shadow page,
917 	 * which doesn't have a parent page table and thus no associated entry.
918 	 */
919 	if (WARN_ON_ONCE(!sp->ptep))
920 		return false;
921 
922 	old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
923 	if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
924 		return false;
925 
926 	__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
927 			   sp->gfn, sp->role.level + 1, true, true);
928 
929 	return true;
930 }
931 
932 /*
933  * If can_yield is true, will release the MMU lock and reschedule if the
934  * scheduler needs the CPU or there is contention on the MMU lock. If this
935  * function cannot yield, it will not release the MMU lock or reschedule and
936  * the caller must ensure it does not supply too large a GFN range, or the
937  * operation can cause a soft lockup.
938  */
939 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
940 			      gfn_t start, gfn_t end, bool can_yield, bool flush)
941 {
942 	struct tdp_iter iter;
943 
944 	end = min(end, tdp_mmu_max_gfn_exclusive());
945 
946 	lockdep_assert_held_write(&kvm->mmu_lock);
947 
948 	rcu_read_lock();
949 
950 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
951 		if (can_yield &&
952 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
953 			flush = false;
954 			continue;
955 		}
956 
957 		if (!is_shadow_present_pte(iter.old_spte) ||
958 		    !is_last_spte(iter.old_spte, iter.level))
959 			continue;
960 
961 		tdp_mmu_set_spte(kvm, &iter, 0);
962 		flush = true;
963 	}
964 
965 	rcu_read_unlock();
966 
967 	/*
968 	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
969 	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
970 	 */
971 	return flush;
972 }
973 
974 /*
975  * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
976  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
977  * more SPTEs were zapped since the MMU lock was last acquired.
978  */
979 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
980 			   bool can_yield, bool flush)
981 {
982 	struct kvm_mmu_page *root;
983 
984 	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
985 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
986 
987 	return flush;
988 }
989 
990 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
991 {
992 	struct kvm_mmu_page *root;
993 	int i;
994 
995 	/*
996 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
997 	 * before returning to the caller.  Zap directly even if the root is
998 	 * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
999 	 * all that expensive and mmu_lock is already held, which means the
1000 	 * worker has yielded, i.e. flushing the work instead of zapping here
1001 	 * isn't guaranteed to be any faster.
1002 	 *
1003 	 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
1004 	 * is being destroyed or the userspace VMM has exited.  In both cases,
1005 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
1006 	 */
1007 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1008 		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
1009 			tdp_mmu_zap_root(kvm, root, false);
1010 	}
1011 }
1012 
1013 /*
1014  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
1015  * zap" completes.
1016  */
1017 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
1018 {
1019 	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
1020 }
1021 
1022 /*
1023  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
1024  * is about to be zapped, e.g. in response to a memslots update.  The actual
1025  * zapping is performed asynchronously, so a reference is taken on all roots.
1026  * Using a separate workqueue makes it easy to ensure that the destruction is
1027  * performed before the "fast zap" completes, without keeping a separate list
1028  * of invalidated roots; the list is effectively the list of work items in
1029  * the workqueue.
1030  *
1031  * Get a reference even if the root is already invalid, the asynchronous worker
1032  * assumes it was gifted a reference to the root it processes.  Because mmu_lock
1033  * is held for write, it should be impossible to observe a root with zero refcount,
1034  * i.e. the list of roots cannot be stale.
1035  *
1036  * This has essentially the same effect for the TDP MMU
1037  * as updating mmu_valid_gen does for the shadow MMU.
1038  */
1039 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
1040 {
1041 	struct kvm_mmu_page *root;
1042 
1043 	lockdep_assert_held_write(&kvm->mmu_lock);
1044 	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1045 		if (!root->role.invalid &&
1046 		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
1047 			root->role.invalid = true;
1048 			tdp_mmu_schedule_zap_root(kvm, root);
1049 		}
1050 	}
1051 }
1052 
1053 /*
1054  * Installs a last-level SPTE to handle a TDP page fault.
1055  * (NPT/EPT violation/misconfiguration)
1056  */
1057 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1058 					  struct kvm_page_fault *fault,
1059 					  struct tdp_iter *iter)
1060 {
1061 	struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1062 	u64 new_spte;
1063 	int ret = RET_PF_FIXED;
1064 	bool wrprot = false;
1065 
1066 	if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
1067 		return RET_PF_RETRY;
1068 
1069 	if (unlikely(!fault->slot))
1070 		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
1071 	else
1072 		wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1073 					 fault->pfn, iter->old_spte, fault->prefetch, true,
1074 					 fault->map_writable, &new_spte);
1075 
1076 	if (new_spte == iter->old_spte)
1077 		ret = RET_PF_SPURIOUS;
1078 	else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
1079 		return RET_PF_RETRY;
1080 	else if (is_shadow_present_pte(iter->old_spte) &&
1081 		 !is_last_spte(iter->old_spte, iter->level))
1082 		kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1083 
1084 	/*
1085 	 * If the page fault was caused by a write but the page is write
1086 	 * protected, emulation is needed. If the emulation was skipped,
1087 	 * the vCPU would have the same fault again.
1088 	 */
1089 	if (wrprot) {
1090 		if (fault->write)
1091 			ret = RET_PF_EMULATE;
1092 	}
1093 
1094 	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1095 	if (unlikely(is_mmio_spte(new_spte))) {
1096 		vcpu->stat.pf_mmio_spte_created++;
1097 		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1098 				     new_spte);
1099 		ret = RET_PF_EMULATE;
1100 	} else {
1101 		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1102 				       rcu_dereference(iter->sptep));
1103 	}
1104 
1105 	return ret;
1106 }
1107 
1108 /*
1109  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1110  * provided page table.
1111  *
1112  * @kvm: kvm instance
1113  * @iter: a tdp_iter instance currently on the SPTE that should be set
1114  * @sp: The new TDP page table to install.
1115  * @shared: This operation is running under the MMU lock in read mode.
1116  *
1117  * Returns: 0 if the new page table was installed. Non-0 if the page table
1118  *          could not be installed (e.g. the atomic compare-exchange failed).
1119  */
1120 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1121 			   struct kvm_mmu_page *sp, bool shared)
1122 {
1123 	u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1124 	int ret = 0;
1125 
1126 	if (shared) {
1127 		ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1128 		if (ret)
1129 			return ret;
1130 	} else {
1131 		tdp_mmu_set_spte(kvm, iter, spte);
1132 	}
1133 
1134 	tdp_account_mmu_page(kvm, sp);
1135 
1136 	return 0;
1137 }
1138 
1139 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1140 				   struct kvm_mmu_page *sp, bool shared);
1141 
1142 /*
1143  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1144  * page tables and SPTEs to translate the faulting guest physical address.
1145  */
1146 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1147 {
1148 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1149 	struct kvm *kvm = vcpu->kvm;
1150 	struct tdp_iter iter;
1151 	struct kvm_mmu_page *sp;
1152 	int ret = RET_PF_RETRY;
1153 
1154 	kvm_mmu_hugepage_adjust(vcpu, fault);
1155 
1156 	trace_kvm_mmu_spte_requested(fault);
1157 
1158 	rcu_read_lock();
1159 
1160 	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1161 		int r;
1162 
1163 		if (fault->nx_huge_page_workaround_enabled)
1164 			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1165 
1166 		/*
1167 		 * If SPTE has been frozen by another thread, just give up and
1168 		 * retry, avoiding unnecessary page table allocation and free.
1169 		 */
1170 		if (is_removed_spte(iter.old_spte))
1171 			goto retry;
1172 
1173 		if (iter.level == fault->goal_level)
1174 			goto map_target_level;
1175 
1176 		/* Step down into the lower level page table if it exists. */
1177 		if (is_shadow_present_pte(iter.old_spte) &&
1178 		    !is_large_pte(iter.old_spte))
1179 			continue;
1180 
1181 		/*
1182 		 * The SPTE is either non-present or points to a huge page that
1183 		 * needs to be split.
1184 		 */
1185 		sp = tdp_mmu_alloc_sp(vcpu);
1186 		tdp_mmu_init_child_sp(sp, &iter);
1187 
1188 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1189 
1190 		if (is_shadow_present_pte(iter.old_spte))
1191 			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1192 		else
1193 			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1194 
1195 		/*
1196 		 * Force the guest to retry if installing an upper level SPTE
1197 		 * failed, e.g. because a different task modified the SPTE.
1198 		 */
1199 		if (r) {
1200 			tdp_mmu_free_sp(sp);
1201 			goto retry;
1202 		}
1203 
1204 		if (fault->huge_page_disallowed &&
1205 		    fault->req_level >= iter.level) {
1206 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1207 			if (sp->nx_huge_page_disallowed)
1208 				track_possible_nx_huge_page(kvm, sp);
1209 			spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * The walk aborted before reaching the target level, e.g. because the
1215 	 * iterator detected an upper level SPTE was frozen during traversal.
1216 	 */
1217 	WARN_ON_ONCE(iter.level == fault->goal_level);
1218 	goto retry;
1219 
1220 map_target_level:
1221 	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1222 
1223 retry:
1224 	rcu_read_unlock();
1225 	return ret;
1226 }
1227 
1228 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1229 				 bool flush)
1230 {
1231 	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
1232 				     range->end, range->may_block, flush);
1233 }
1234 
1235 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1236 			      struct kvm_gfn_range *range);
1237 
1238 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1239 						   struct kvm_gfn_range *range,
1240 						   tdp_handler_t handler)
1241 {
1242 	struct kvm_mmu_page *root;
1243 	struct tdp_iter iter;
1244 	bool ret = false;
1245 
1246 	/*
1247 	 * Don't support rescheduling, none of the MMU notifiers that funnel
1248 	 * into this helper allow blocking; it'd be dead, wasteful code.
1249 	 */
1250 	for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1251 		rcu_read_lock();
1252 
1253 		tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1254 			ret |= handler(kvm, &iter, range);
1255 
1256 		rcu_read_unlock();
1257 	}
1258 
1259 	return ret;
1260 }
1261 
1262 /*
1263  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1264  * if any of the GFNs in the range have been accessed.
1265  */
1266 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1267 			  struct kvm_gfn_range *range)
1268 {
1269 	u64 new_spte = 0;
1270 
1271 	/* If we have a non-accessed entry we don't need to change the pte. */
1272 	if (!is_accessed_spte(iter->old_spte))
1273 		return false;
1274 
1275 	new_spte = iter->old_spte;
1276 
1277 	if (spte_ad_enabled(new_spte)) {
1278 		new_spte &= ~shadow_accessed_mask;
1279 	} else {
1280 		/*
1281 		 * Capture the dirty status of the page, so that it doesn't get
1282 		 * lost when the SPTE is marked for access tracking.
1283 		 */
1284 		if (is_writable_pte(new_spte))
1285 			kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1286 
1287 		new_spte = mark_spte_for_access_track(new_spte);
1288 	}
1289 
1290 	tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1291 
1292 	return true;
1293 }
1294 
1295 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1296 {
1297 	return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1298 }
1299 
1300 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1301 			 struct kvm_gfn_range *range)
1302 {
1303 	return is_accessed_spte(iter->old_spte);
1304 }
1305 
1306 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1307 {
1308 	return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1309 }
1310 
1311 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1312 			 struct kvm_gfn_range *range)
1313 {
1314 	u64 new_spte;
1315 
1316 	/* Huge pages aren't expected to be modified without first being zapped. */
1317 	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1318 
1319 	if (iter->level != PG_LEVEL_4K ||
1320 	    !is_shadow_present_pte(iter->old_spte))
1321 		return false;
1322 
1323 	/*
1324 	 * Note, when changing a read-only SPTE, it's not strictly necessary to
1325 	 * zero the SPTE before setting the new PFN, but doing so preserves the
1326 	 * invariant that the PFN of a present * leaf SPTE can never change.
1327 	 * See __handle_changed_spte().
1328 	 */
1329 	tdp_mmu_set_spte(kvm, iter, 0);
1330 
1331 	if (!pte_write(range->pte)) {
1332 		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1333 								  pte_pfn(range->pte));
1334 
1335 		tdp_mmu_set_spte(kvm, iter, new_spte);
1336 	}
1337 
1338 	return true;
1339 }
1340 
1341 /*
1342  * Handle the changed_pte MMU notifier for the TDP MMU.
1343  * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1344  * notifier.
1345  * Returns non-zero if a flush is needed before releasing the MMU lock.
1346  */
1347 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1348 {
1349 	/*
1350 	 * No need to handle the remote TLB flush under RCU protection, the
1351 	 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1352 	 * shadow page.  See the WARN on pfn_changed in __handle_changed_spte().
1353 	 */
1354 	return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1355 }
1356 
1357 /*
1358  * Remove write access from all SPTEs at or above min_level that map GFNs
1359  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1360  * be flushed.
1361  */
1362 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1363 			     gfn_t start, gfn_t end, int min_level)
1364 {
1365 	struct tdp_iter iter;
1366 	u64 new_spte;
1367 	bool spte_set = false;
1368 
1369 	rcu_read_lock();
1370 
1371 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1372 
1373 	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1374 retry:
1375 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1376 			continue;
1377 
1378 		if (!is_shadow_present_pte(iter.old_spte) ||
1379 		    !is_last_spte(iter.old_spte, iter.level) ||
1380 		    !(iter.old_spte & PT_WRITABLE_MASK))
1381 			continue;
1382 
1383 		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1384 
1385 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1386 			goto retry;
1387 
1388 		spte_set = true;
1389 	}
1390 
1391 	rcu_read_unlock();
1392 	return spte_set;
1393 }
1394 
1395 /*
1396  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1397  * only affect leaf SPTEs down to min_level.
1398  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1399  */
1400 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1401 			     const struct kvm_memory_slot *slot, int min_level)
1402 {
1403 	struct kvm_mmu_page *root;
1404 	bool spte_set = false;
1405 
1406 	lockdep_assert_held_read(&kvm->mmu_lock);
1407 
1408 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1409 		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1410 			     slot->base_gfn + slot->npages, min_level);
1411 
1412 	return spte_set;
1413 }
1414 
1415 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1416 {
1417 	struct kvm_mmu_page *sp;
1418 
1419 	gfp |= __GFP_ZERO;
1420 
1421 	sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1422 	if (!sp)
1423 		return NULL;
1424 
1425 	sp->spt = (void *)__get_free_page(gfp);
1426 	if (!sp->spt) {
1427 		kmem_cache_free(mmu_page_header_cache, sp);
1428 		return NULL;
1429 	}
1430 
1431 	return sp;
1432 }
1433 
1434 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1435 						       struct tdp_iter *iter,
1436 						       bool shared)
1437 {
1438 	struct kvm_mmu_page *sp;
1439 
1440 	/*
1441 	 * Since we are allocating while under the MMU lock we have to be
1442 	 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1443 	 * reclaim and to avoid making any filesystem callbacks (which can end
1444 	 * up invoking KVM MMU notifiers, resulting in a deadlock).
1445 	 *
1446 	 * If this allocation fails we drop the lock and retry with reclaim
1447 	 * allowed.
1448 	 */
1449 	sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1450 	if (sp)
1451 		return sp;
1452 
1453 	rcu_read_unlock();
1454 
1455 	if (shared)
1456 		read_unlock(&kvm->mmu_lock);
1457 	else
1458 		write_unlock(&kvm->mmu_lock);
1459 
1460 	iter->yielded = true;
1461 	sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1462 
1463 	if (shared)
1464 		read_lock(&kvm->mmu_lock);
1465 	else
1466 		write_lock(&kvm->mmu_lock);
1467 
1468 	rcu_read_lock();
1469 
1470 	return sp;
1471 }
1472 
1473 /* Note, the caller is responsible for initializing @sp. */
1474 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1475 				   struct kvm_mmu_page *sp, bool shared)
1476 {
1477 	const u64 huge_spte = iter->old_spte;
1478 	const int level = iter->level;
1479 	int ret, i;
1480 
1481 	/*
1482 	 * No need for atomics when writing to sp->spt since the page table has
1483 	 * not been linked in yet and thus is not reachable from any other CPU.
1484 	 */
1485 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1486 		sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1487 
1488 	/*
1489 	 * Replace the huge spte with a pointer to the populated lower level
1490 	 * page table. Since we are making this change without a TLB flush vCPUs
1491 	 * will see a mix of the split mappings and the original huge mapping,
1492 	 * depending on what's currently in their TLB. This is fine from a
1493 	 * correctness standpoint since the translation will be the same either
1494 	 * way.
1495 	 */
1496 	ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1497 	if (ret)
1498 		goto out;
1499 
1500 	/*
1501 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1502 	 * are overwriting from the page stats. But we have to manually update
1503 	 * the page stats with the new present child pages.
1504 	 */
1505 	kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1506 
1507 out:
1508 	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1509 	return ret;
1510 }
1511 
1512 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1513 					 struct kvm_mmu_page *root,
1514 					 gfn_t start, gfn_t end,
1515 					 int target_level, bool shared)
1516 {
1517 	struct kvm_mmu_page *sp = NULL;
1518 	struct tdp_iter iter;
1519 	int ret = 0;
1520 
1521 	rcu_read_lock();
1522 
1523 	/*
1524 	 * Traverse the page table splitting all huge pages above the target
1525 	 * level into one lower level. For example, if we encounter a 1GB page
1526 	 * we split it into 512 2MB pages.
1527 	 *
1528 	 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1529 	 * to visit an SPTE before ever visiting its children, which means we
1530 	 * will correctly recursively split huge pages that are more than one
1531 	 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1532 	 * and then splitting each of those to 512 4KB pages).
1533 	 */
1534 	for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1535 retry:
1536 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1537 			continue;
1538 
1539 		if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1540 			continue;
1541 
1542 		if (!sp) {
1543 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1544 			if (!sp) {
1545 				ret = -ENOMEM;
1546 				trace_kvm_mmu_split_huge_page(iter.gfn,
1547 							      iter.old_spte,
1548 							      iter.level, ret);
1549 				break;
1550 			}
1551 
1552 			if (iter.yielded)
1553 				continue;
1554 		}
1555 
1556 		tdp_mmu_init_child_sp(sp, &iter);
1557 
1558 		if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1559 			goto retry;
1560 
1561 		sp = NULL;
1562 	}
1563 
1564 	rcu_read_unlock();
1565 
1566 	/*
1567 	 * It's possible to exit the loop having never used the last sp if, for
1568 	 * example, a vCPU doing HugePage NX splitting wins the race and
1569 	 * installs its own sp in place of the last sp we tried to split.
1570 	 */
1571 	if (sp)
1572 		tdp_mmu_free_sp(sp);
1573 
1574 	return ret;
1575 }
1576 
1577 
1578 /*
1579  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1580  */
1581 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1582 				      const struct kvm_memory_slot *slot,
1583 				      gfn_t start, gfn_t end,
1584 				      int target_level, bool shared)
1585 {
1586 	struct kvm_mmu_page *root;
1587 	int r = 0;
1588 
1589 	kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1590 
1591 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1592 		r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1593 		if (r) {
1594 			kvm_tdp_mmu_put_root(kvm, root, shared);
1595 			break;
1596 		}
1597 	}
1598 }
1599 
1600 /*
1601  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1602  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1603  * If AD bits are not enabled, this will require clearing the writable bit on
1604  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1605  * be flushed.
1606  */
1607 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1608 			   gfn_t start, gfn_t end)
1609 {
1610 	struct tdp_iter iter;
1611 	u64 new_spte;
1612 	bool spte_set = false;
1613 
1614 	rcu_read_lock();
1615 
1616 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
1617 retry:
1618 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1619 			continue;
1620 
1621 		if (!is_shadow_present_pte(iter.old_spte))
1622 			continue;
1623 
1624 		if (spte_ad_need_write_protect(iter.old_spte)) {
1625 			if (is_writable_pte(iter.old_spte))
1626 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1627 			else
1628 				continue;
1629 		} else {
1630 			if (iter.old_spte & shadow_dirty_mask)
1631 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1632 			else
1633 				continue;
1634 		}
1635 
1636 		if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1637 			goto retry;
1638 
1639 		spte_set = true;
1640 	}
1641 
1642 	rcu_read_unlock();
1643 	return spte_set;
1644 }
1645 
1646 /*
1647  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1648  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1649  * If AD bits are not enabled, this will require clearing the writable bit on
1650  * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1651  * be flushed.
1652  */
1653 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1654 				  const struct kvm_memory_slot *slot)
1655 {
1656 	struct kvm_mmu_page *root;
1657 	bool spte_set = false;
1658 
1659 	lockdep_assert_held_read(&kvm->mmu_lock);
1660 
1661 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1662 		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1663 				slot->base_gfn + slot->npages);
1664 
1665 	return spte_set;
1666 }
1667 
1668 /*
1669  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1670  * set in mask, starting at gfn. The given memslot is expected to contain all
1671  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1672  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1673  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1674  */
1675 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1676 				  gfn_t gfn, unsigned long mask, bool wrprot)
1677 {
1678 	struct tdp_iter iter;
1679 	u64 new_spte;
1680 
1681 	rcu_read_lock();
1682 
1683 	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1684 				    gfn + BITS_PER_LONG) {
1685 		if (!mask)
1686 			break;
1687 
1688 		if (iter.level > PG_LEVEL_4K ||
1689 		    !(mask & (1UL << (iter.gfn - gfn))))
1690 			continue;
1691 
1692 		mask &= ~(1UL << (iter.gfn - gfn));
1693 
1694 		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1695 			if (is_writable_pte(iter.old_spte))
1696 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1697 			else
1698 				continue;
1699 		} else {
1700 			if (iter.old_spte & shadow_dirty_mask)
1701 				new_spte = iter.old_spte & ~shadow_dirty_mask;
1702 			else
1703 				continue;
1704 		}
1705 
1706 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1707 	}
1708 
1709 	rcu_read_unlock();
1710 }
1711 
1712 /*
1713  * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1714  * set in mask, starting at gfn. The given memslot is expected to contain all
1715  * the GFNs represented by set bits in the mask. If AD bits are enabled,
1716  * clearing the dirty status will involve clearing the dirty bit on each SPTE
1717  * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1718  */
1719 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1720 				       struct kvm_memory_slot *slot,
1721 				       gfn_t gfn, unsigned long mask,
1722 				       bool wrprot)
1723 {
1724 	struct kvm_mmu_page *root;
1725 
1726 	lockdep_assert_held_write(&kvm->mmu_lock);
1727 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1728 		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1729 }
1730 
1731 static void zap_collapsible_spte_range(struct kvm *kvm,
1732 				       struct kvm_mmu_page *root,
1733 				       const struct kvm_memory_slot *slot)
1734 {
1735 	gfn_t start = slot->base_gfn;
1736 	gfn_t end = start + slot->npages;
1737 	struct tdp_iter iter;
1738 	int max_mapping_level;
1739 
1740 	rcu_read_lock();
1741 
1742 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1743 retry:
1744 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1745 			continue;
1746 
1747 		if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1748 		    !is_shadow_present_pte(iter.old_spte))
1749 			continue;
1750 
1751 		/*
1752 		 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1753 		 * a large page size, then its parent would have been zapped
1754 		 * instead of stepping down.
1755 		 */
1756 		if (is_last_spte(iter.old_spte, iter.level))
1757 			continue;
1758 
1759 		/*
1760 		 * If iter.gfn resides outside of the slot, i.e. the page for
1761 		 * the current level overlaps but is not contained by the slot,
1762 		 * then the SPTE can't be made huge.  More importantly, trying
1763 		 * to query that info from slot->arch.lpage_info will cause an
1764 		 * out-of-bounds access.
1765 		 */
1766 		if (iter.gfn < start || iter.gfn >= end)
1767 			continue;
1768 
1769 		max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1770 							      iter.gfn, PG_LEVEL_NUM);
1771 		if (max_mapping_level < iter.level)
1772 			continue;
1773 
1774 		/* Note, a successful atomic zap also does a remote TLB flush. */
1775 		if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1776 			goto retry;
1777 	}
1778 
1779 	rcu_read_unlock();
1780 }
1781 
1782 /*
1783  * Zap non-leaf SPTEs (and free their associated page tables) which could
1784  * be replaced by huge pages, for GFNs within the slot.
1785  */
1786 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1787 				       const struct kvm_memory_slot *slot)
1788 {
1789 	struct kvm_mmu_page *root;
1790 
1791 	lockdep_assert_held_read(&kvm->mmu_lock);
1792 
1793 	for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1794 		zap_collapsible_spte_range(kvm, root, slot);
1795 }
1796 
1797 /*
1798  * Removes write access on the last level SPTE mapping this GFN and unsets the
1799  * MMU-writable bit to ensure future writes continue to be intercepted.
1800  * Returns true if an SPTE was set and a TLB flush is needed.
1801  */
1802 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1803 			      gfn_t gfn, int min_level)
1804 {
1805 	struct tdp_iter iter;
1806 	u64 new_spte;
1807 	bool spte_set = false;
1808 
1809 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1810 
1811 	rcu_read_lock();
1812 
1813 	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1814 		if (!is_shadow_present_pte(iter.old_spte) ||
1815 		    !is_last_spte(iter.old_spte, iter.level))
1816 			continue;
1817 
1818 		new_spte = iter.old_spte &
1819 			~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1820 
1821 		if (new_spte == iter.old_spte)
1822 			break;
1823 
1824 		tdp_mmu_set_spte(kvm, &iter, new_spte);
1825 		spte_set = true;
1826 	}
1827 
1828 	rcu_read_unlock();
1829 
1830 	return spte_set;
1831 }
1832 
1833 /*
1834  * Removes write access on the last level SPTE mapping this GFN and unsets the
1835  * MMU-writable bit to ensure future writes continue to be intercepted.
1836  * Returns true if an SPTE was set and a TLB flush is needed.
1837  */
1838 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1839 				   struct kvm_memory_slot *slot, gfn_t gfn,
1840 				   int min_level)
1841 {
1842 	struct kvm_mmu_page *root;
1843 	bool spte_set = false;
1844 
1845 	lockdep_assert_held_write(&kvm->mmu_lock);
1846 	for_each_tdp_mmu_root(kvm, root, slot->as_id)
1847 		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1848 
1849 	return spte_set;
1850 }
1851 
1852 /*
1853  * Return the level of the lowest level SPTE added to sptes.
1854  * That SPTE may be non-present.
1855  *
1856  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1857  */
1858 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1859 			 int *root_level)
1860 {
1861 	struct tdp_iter iter;
1862 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1863 	gfn_t gfn = addr >> PAGE_SHIFT;
1864 	int leaf = -1;
1865 
1866 	*root_level = vcpu->arch.mmu->root_role.level;
1867 
1868 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1869 		leaf = iter.level;
1870 		sptes[leaf] = iter.old_spte;
1871 	}
1872 
1873 	return leaf;
1874 }
1875 
1876 /*
1877  * Returns the last level spte pointer of the shadow page walk for the given
1878  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1879  * walk could be performed, returns NULL and *spte does not contain valid data.
1880  *
1881  * Contract:
1882  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1883  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1884  *
1885  * WARNING: This function is only intended to be called during fast_page_fault.
1886  */
1887 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1888 					u64 *spte)
1889 {
1890 	struct tdp_iter iter;
1891 	struct kvm_mmu *mmu = vcpu->arch.mmu;
1892 	gfn_t gfn = addr >> PAGE_SHIFT;
1893 	tdp_ptep_t sptep = NULL;
1894 
1895 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1896 		*spte = iter.old_spte;
1897 		sptep = iter.sptep;
1898 	}
1899 
1900 	/*
1901 	 * Perform the rcu_dereference to get the raw spte pointer value since
1902 	 * we are passing it up to fast_page_fault, which is shared with the
1903 	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1904 	 * annotation.
1905 	 *
1906 	 * This is safe since fast_page_fault obeys the contracts of this
1907 	 * function as well as all TDP MMU contracts around modifying SPTEs
1908 	 * outside of mmu_lock.
1909 	 */
1910 	return rcu_dereference(sptep);
1911 }
1912