1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include "mmu.h"
5 #include "mmu_internal.h"
6 #include "mmutrace.h"
7 #include "tdp_iter.h"
8 #include "tdp_mmu.h"
9 #include "spte.h"
10
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
13
14 /* Initializes the TDP MMU for the VM, if enabled. */
kvm_mmu_init_tdp_mmu(struct kvm * kvm)15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
16 {
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
19 }
20
21 /* Arbitrarily returns true so that this may be used in if statements. */
kvm_lockdep_assert_mmu_lock_held(struct kvm * kvm,bool shared)22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
23 bool shared)
24 {
25 if (shared)
26 lockdep_assert_held_read(&kvm->mmu_lock);
27 else
28 lockdep_assert_held_write(&kvm->mmu_lock);
29
30 return true;
31 }
32
kvm_mmu_uninit_tdp_mmu(struct kvm * kvm)33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
34 {
35 /*
36 * Invalidate all roots, which besides the obvious, schedules all roots
37 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38 * ultimately frees all roots.
39 */
40 kvm_tdp_mmu_invalidate_all_roots(kvm);
41 kvm_tdp_mmu_zap_invalidated_roots(kvm);
42
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45
46 /*
47 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 * can run before the VM is torn down. Putting the last reference to
49 * zapped roots will create new callbacks.
50 */
51 rcu_barrier();
52 }
53
tdp_mmu_free_sp(struct kvm_mmu_page * sp)54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
55 {
56 free_page((unsigned long)sp->spt);
57 kmem_cache_free(mmu_page_header_cache, sp);
58 }
59
60 /*
61 * This is called through call_rcu in order to free TDP page table memory
62 * safely with respect to other kernel threads that may be operating on
63 * the memory.
64 * By only accessing TDP MMU page table memory in an RCU read critical
65 * section, and freeing it after a grace period, lockless access to that
66 * memory won't use it after it is freed.
67 */
tdp_mmu_free_sp_rcu_callback(struct rcu_head * head)68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
69 {
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
71 rcu_head);
72
73 tdp_mmu_free_sp(sp);
74 }
75
kvm_tdp_mmu_put_root(struct kvm * kvm,struct kvm_mmu_page * root,bool shared)76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
77 bool shared)
78 {
79 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
80
81 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
82 return;
83
84 /*
85 * The TDP MMU itself holds a reference to each root until the root is
86 * explicitly invalidated, i.e. the final reference should be never be
87 * put for a valid root.
88 */
89 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
90
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
92 list_del_rcu(&root->link);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
94 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
95 }
96
97 /*
98 * Returns the next root after @prev_root (or the first root if @prev_root is
99 * NULL). A reference to the returned root is acquired, and the reference to
100 * @prev_root is released (the caller obviously must hold a reference to
101 * @prev_root if it's non-NULL).
102 *
103 * If @only_valid is true, invalid roots are skipped.
104 *
105 * Returns NULL if the end of tdp_mmu_roots was reached.
106 */
tdp_mmu_next_root(struct kvm * kvm,struct kvm_mmu_page * prev_root,bool shared,bool only_valid)107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
108 struct kvm_mmu_page *prev_root,
109 bool shared, bool only_valid)
110 {
111 struct kvm_mmu_page *next_root;
112
113 rcu_read_lock();
114
115 if (prev_root)
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
117 &prev_root->link,
118 typeof(*prev_root), link);
119 else
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*next_root), link);
122
123 while (next_root) {
124 if ((!only_valid || !next_root->role.invalid) &&
125 kvm_tdp_mmu_get_root(next_root))
126 break;
127
128 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
129 &next_root->link, typeof(*next_root), link);
130 }
131
132 rcu_read_unlock();
133
134 if (prev_root)
135 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
136
137 return next_root;
138 }
139
140 /*
141 * Note: this iterator gets and puts references to the roots it iterates over.
142 * This makes it safe to release the MMU lock and yield within the loop, but
143 * if exiting the loop early, the caller must drop the reference to the most
144 * recent root. (Unless keeping a live reference is desirable.)
145 *
146 * If shared is set, this function is operating under the MMU lock in read
147 * mode. In the unlikely event that this thread must free a root, the lock
148 * will be temporarily dropped and reacquired in write mode.
149 */
150 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
151 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
152 _root; \
153 _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
154 if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
155 kvm_mmu_page_as_id(_root) != _as_id) { \
156 } else
157
158 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
159 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
160
161 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
162 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
163 _root; \
164 _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
165 if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
166 } else
167
168 /*
169 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
170 * the implication being that any flow that holds mmu_lock for read is
171 * inherently yield-friendly and should use the yield-safe variant above.
172 * Holding mmu_lock for write obviates the need for RCU protection as the list
173 * is guaranteed to be stable.
174 */
175 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
176 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
177 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
178 kvm_mmu_page_as_id(_root) != _as_id) { \
179 } else
180
tdp_mmu_alloc_sp(struct kvm_vcpu * vcpu)181 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
182 {
183 struct kvm_mmu_page *sp;
184
185 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
186 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
187
188 return sp;
189 }
190
tdp_mmu_init_sp(struct kvm_mmu_page * sp,tdp_ptep_t sptep,gfn_t gfn,union kvm_mmu_page_role role)191 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
192 gfn_t gfn, union kvm_mmu_page_role role)
193 {
194 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
195
196 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
197
198 sp->role = role;
199 sp->gfn = gfn;
200 sp->ptep = sptep;
201 sp->tdp_mmu_page = true;
202
203 trace_kvm_mmu_get_page(sp, true);
204 }
205
tdp_mmu_init_child_sp(struct kvm_mmu_page * child_sp,struct tdp_iter * iter)206 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
207 struct tdp_iter *iter)
208 {
209 struct kvm_mmu_page *parent_sp;
210 union kvm_mmu_page_role role;
211
212 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
213
214 role = parent_sp->role;
215 role.level--;
216
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
218 }
219
kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu * vcpu)220 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
221 {
222 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
223 struct kvm *kvm = vcpu->kvm;
224 struct kvm_mmu_page *root;
225
226 lockdep_assert_held_write(&kvm->mmu_lock);
227
228 /*
229 * Check for an existing root before allocating a new one. Note, the
230 * role check prevents consuming an invalid root.
231 */
232 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
233 if (root->role.word == role.word &&
234 kvm_tdp_mmu_get_root(root))
235 goto out;
236 }
237
238 root = tdp_mmu_alloc_sp(vcpu);
239 tdp_mmu_init_sp(root, NULL, 0, role);
240
241 /*
242 * TDP MMU roots are kept until they are explicitly invalidated, either
243 * by a memslot update or by the destruction of the VM. Initialize the
244 * refcount to two; one reference for the vCPU, and one reference for
245 * the TDP MMU itself, which is held until the root is invalidated and
246 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
247 */
248 refcount_set(&root->tdp_mmu_root_count, 2);
249
250 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
252 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
253
254 out:
255 return __pa(root->spt);
256 }
257
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
259 u64 old_spte, u64 new_spte, int level,
260 bool shared);
261
tdp_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)262 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
263 {
264 kvm_account_pgtable_pages((void *)sp->spt, +1);
265 atomic64_inc(&kvm->arch.tdp_mmu_pages);
266 }
267
tdp_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)268 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
269 {
270 kvm_account_pgtable_pages((void *)sp->spt, -1);
271 atomic64_dec(&kvm->arch.tdp_mmu_pages);
272 }
273
274 /**
275 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
276 *
277 * @kvm: kvm instance
278 * @sp: the page to be removed
279 * @shared: This operation may not be running under the exclusive use of
280 * the MMU lock and the operation must synchronize with other
281 * threads that might be adding or removing pages.
282 */
tdp_mmu_unlink_sp(struct kvm * kvm,struct kvm_mmu_page * sp,bool shared)283 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
284 bool shared)
285 {
286 tdp_unaccount_mmu_page(kvm, sp);
287
288 if (!sp->nx_huge_page_disallowed)
289 return;
290
291 if (shared)
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
293 else
294 lockdep_assert_held_write(&kvm->mmu_lock);
295
296 sp->nx_huge_page_disallowed = false;
297 untrack_possible_nx_huge_page(kvm, sp);
298
299 if (shared)
300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
301 }
302
303 /**
304 * handle_removed_pt() - handle a page table removed from the TDP structure
305 *
306 * @kvm: kvm instance
307 * @pt: the page removed from the paging structure
308 * @shared: This operation may not be running under the exclusive use
309 * of the MMU lock and the operation must synchronize with other
310 * threads that might be modifying SPTEs.
311 *
312 * Given a page table that has been removed from the TDP paging structure,
313 * iterates through the page table to clear SPTEs and free child page tables.
314 *
315 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
316 * protection. Since this thread removed it from the paging structure,
317 * this thread will be responsible for ensuring the page is freed. Hence the
318 * early rcu_dereferences in the function.
319 */
handle_removed_pt(struct kvm * kvm,tdp_ptep_t pt,bool shared)320 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
321 {
322 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
323 int level = sp->role.level;
324 gfn_t base_gfn = sp->gfn;
325 int i;
326
327 trace_kvm_mmu_prepare_zap_page(sp);
328
329 tdp_mmu_unlink_sp(kvm, sp, shared);
330
331 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
332 tdp_ptep_t sptep = pt + i;
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
334 u64 old_spte;
335
336 if (shared) {
337 /*
338 * Set the SPTE to a nonpresent value that other
339 * threads will not overwrite. If the SPTE was
340 * already marked as removed then another thread
341 * handling a page fault could overwrite it, so
342 * set the SPTE until it is set from some other
343 * value to the removed SPTE value.
344 */
345 for (;;) {
346 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
347 if (!is_removed_spte(old_spte))
348 break;
349 cpu_relax();
350 }
351 } else {
352 /*
353 * If the SPTE is not MMU-present, there is no backing
354 * page associated with the SPTE and so no side effects
355 * that need to be recorded, and exclusive ownership of
356 * mmu_lock ensures the SPTE can't be made present.
357 * Note, zapping MMIO SPTEs is also unnecessary as they
358 * are guarded by the memslots generation, not by being
359 * unreachable.
360 */
361 old_spte = kvm_tdp_mmu_read_spte(sptep);
362 if (!is_shadow_present_pte(old_spte))
363 continue;
364
365 /*
366 * Use the common helper instead of a raw WRITE_ONCE as
367 * the SPTE needs to be updated atomically if it can be
368 * modified by a different vCPU outside of mmu_lock.
369 * Even though the parent SPTE is !PRESENT, the TLB
370 * hasn't yet been flushed, and both Intel and AMD
371 * document that A/D assists can use upper-level PxE
372 * entries that are cached in the TLB, i.e. the CPU can
373 * still access the page and mark it dirty.
374 *
375 * No retry is needed in the atomic update path as the
376 * sole concern is dropping a Dirty bit, i.e. no other
377 * task can zap/remove the SPTE as mmu_lock is held for
378 * write. Marking the SPTE as a removed SPTE is not
379 * strictly necessary for the same reason, but using
380 * the remove SPTE value keeps the shared/exclusive
381 * paths consistent and allows the handle_changed_spte()
382 * call below to hardcode the new value to REMOVED_SPTE.
383 *
384 * Note, even though dropping a Dirty bit is the only
385 * scenario where a non-atomic update could result in a
386 * functional bug, simply checking the Dirty bit isn't
387 * sufficient as a fast page fault could read the upper
388 * level SPTE before it is zapped, and then make this
389 * target SPTE writable, resume the guest, and set the
390 * Dirty bit between reading the SPTE above and writing
391 * it here.
392 */
393 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
394 REMOVED_SPTE, level);
395 }
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
397 old_spte, REMOVED_SPTE, level, shared);
398 }
399
400 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
401 }
402
403 /**
404 * handle_changed_spte - handle bookkeeping associated with an SPTE change
405 * @kvm: kvm instance
406 * @as_id: the address space of the paging structure the SPTE was a part of
407 * @gfn: the base GFN that was mapped by the SPTE
408 * @old_spte: The value of the SPTE before the change
409 * @new_spte: The value of the SPTE after the change
410 * @level: the level of the PT the SPTE is part of in the paging structure
411 * @shared: This operation may not be running under the exclusive use of
412 * the MMU lock and the operation must synchronize with other
413 * threads that might be modifying SPTEs.
414 *
415 * Handle bookkeeping that might result from the modification of a SPTE. Note,
416 * dirty logging updates are handled in common code, not here (see make_spte()
417 * and fast_pf_fix_direct_spte()).
418 */
handle_changed_spte(struct kvm * kvm,int as_id,gfn_t gfn,u64 old_spte,u64 new_spte,int level,bool shared)419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
420 u64 old_spte, u64 new_spte, int level,
421 bool shared)
422 {
423 bool was_present = is_shadow_present_pte(old_spte);
424 bool is_present = is_shadow_present_pte(new_spte);
425 bool was_leaf = was_present && is_last_spte(old_spte, level);
426 bool is_leaf = is_present && is_last_spte(new_spte, level);
427 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
428
429 WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
430 WARN_ON_ONCE(level < PG_LEVEL_4K);
431 WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
432
433 /*
434 * If this warning were to trigger it would indicate that there was a
435 * missing MMU notifier or a race with some notifier handler.
436 * A present, leaf SPTE should never be directly replaced with another
437 * present leaf SPTE pointing to a different PFN. A notifier handler
438 * should be zapping the SPTE before the main MM's page table is
439 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
440 * thread before replacement.
441 */
442 if (was_leaf && is_leaf && pfn_changed) {
443 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
444 "SPTE with another present leaf SPTE mapping a\n"
445 "different PFN!\n"
446 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
447 as_id, gfn, old_spte, new_spte, level);
448
449 /*
450 * Crash the host to prevent error propagation and guest data
451 * corruption.
452 */
453 BUG();
454 }
455
456 if (old_spte == new_spte)
457 return;
458
459 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
460
461 if (is_leaf)
462 check_spte_writable_invariants(new_spte);
463
464 /*
465 * The only times a SPTE should be changed from a non-present to
466 * non-present state is when an MMIO entry is installed/modified/
467 * removed. In that case, there is nothing to do here.
468 */
469 if (!was_present && !is_present) {
470 /*
471 * If this change does not involve a MMIO SPTE or removed SPTE,
472 * it is unexpected. Log the change, though it should not
473 * impact the guest since both the former and current SPTEs
474 * are nonpresent.
475 */
476 if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
477 !is_mmio_spte(new_spte) &&
478 !is_removed_spte(new_spte)))
479 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
480 "should not be replaced with another,\n"
481 "different nonpresent SPTE, unless one or both\n"
482 "are MMIO SPTEs, or the new SPTE is\n"
483 "a temporary removed SPTE.\n"
484 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
485 as_id, gfn, old_spte, new_spte, level);
486 return;
487 }
488
489 if (is_leaf != was_leaf)
490 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
491
492 if (was_leaf && is_dirty_spte(old_spte) &&
493 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
494 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
495
496 /*
497 * Recursively handle child PTs if the change removed a subtree from
498 * the paging structure. Note the WARN on the PFN changing without the
499 * SPTE being converted to a hugepage (leaf) or being zapped. Shadow
500 * pages are kernel allocations and should never be migrated.
501 */
502 if (was_present && !was_leaf &&
503 (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
504 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
505
506 if (was_leaf && is_accessed_spte(old_spte) &&
507 (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
508 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
509 }
510
511 /*
512 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
513 * and handle the associated bookkeeping. Do not mark the page dirty
514 * in KVM's dirty bitmaps.
515 *
516 * If setting the SPTE fails because it has changed, iter->old_spte will be
517 * refreshed to the current value of the spte.
518 *
519 * @kvm: kvm instance
520 * @iter: a tdp_iter instance currently on the SPTE that should be set
521 * @new_spte: The value the SPTE should be set to
522 * Return:
523 * * 0 - If the SPTE was set.
524 * * -EBUSY - If the SPTE cannot be set. In this case this function will have
525 * no side-effects other than setting iter->old_spte to the last
526 * known value of the spte.
527 */
tdp_mmu_set_spte_atomic(struct kvm * kvm,struct tdp_iter * iter,u64 new_spte)528 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
529 struct tdp_iter *iter,
530 u64 new_spte)
531 {
532 u64 *sptep = rcu_dereference(iter->sptep);
533
534 /*
535 * The caller is responsible for ensuring the old SPTE is not a REMOVED
536 * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
537 * and pre-checking before inserting a new SPTE is advantageous as it
538 * avoids unnecessary work.
539 */
540 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
541
542 lockdep_assert_held_read(&kvm->mmu_lock);
543
544 /*
545 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
546 * does not hold the mmu_lock. On failure, i.e. if a different logical
547 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
548 * the current value, so the caller operates on fresh data, e.g. if it
549 * retries tdp_mmu_set_spte_atomic()
550 */
551 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
552 return -EBUSY;
553
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
555 new_spte, iter->level, true);
556
557 return 0;
558 }
559
tdp_mmu_zap_spte_atomic(struct kvm * kvm,struct tdp_iter * iter)560 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
561 struct tdp_iter *iter)
562 {
563 int ret;
564
565 /*
566 * Freeze the SPTE by setting it to a special,
567 * non-present value. This will stop other threads from
568 * immediately installing a present entry in its place
569 * before the TLBs are flushed.
570 */
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
572 if (ret)
573 return ret;
574
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
576
577 /*
578 * No other thread can overwrite the removed SPTE as they must either
579 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
580 * overwrite the special removed SPTE value. No bookkeeping is needed
581 * here since the SPTE is going from non-present to non-present. Use
582 * the raw write helper to avoid an unnecessary check on volatile bits.
583 */
584 __kvm_tdp_mmu_write_spte(iter->sptep, 0);
585
586 return 0;
587 }
588
589
590 /*
591 * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
592 * @kvm: KVM instance
593 * @as_id: Address space ID, i.e. regular vs. SMM
594 * @sptep: Pointer to the SPTE
595 * @old_spte: The current value of the SPTE
596 * @new_spte: The new value that will be set for the SPTE
597 * @gfn: The base GFN that was (or will be) mapped by the SPTE
598 * @level: The level _containing_ the SPTE (its parent PT's level)
599 *
600 * Returns the old SPTE value, which _may_ be different than @old_spte if the
601 * SPTE had voldatile bits.
602 */
tdp_mmu_set_spte(struct kvm * kvm,int as_id,tdp_ptep_t sptep,u64 old_spte,u64 new_spte,gfn_t gfn,int level)603 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
604 u64 old_spte, u64 new_spte, gfn_t gfn, int level)
605 {
606 lockdep_assert_held_write(&kvm->mmu_lock);
607
608 /*
609 * No thread should be using this function to set SPTEs to or from the
610 * temporary removed SPTE value.
611 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
612 * should be used. If operating under the MMU lock in write mode, the
613 * use of the removed SPTE should not be necessary.
614 */
615 WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
616
617 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
618
619 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
620 return old_spte;
621 }
622
tdp_mmu_iter_set_spte(struct kvm * kvm,struct tdp_iter * iter,u64 new_spte)623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
624 u64 new_spte)
625 {
626 WARN_ON_ONCE(iter->yielded);
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
628 iter->old_spte, new_spte,
629 iter->gfn, iter->level);
630 }
631
632 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
633 for_each_tdp_pte(_iter, _root, _start, _end)
634
635 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
636 tdp_root_for_each_pte(_iter, _root, _start, _end) \
637 if (!is_shadow_present_pte(_iter.old_spte) || \
638 !is_last_spte(_iter.old_spte, _iter.level)) \
639 continue; \
640 else
641
642 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
643 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
644
645 /*
646 * Yield if the MMU lock is contended or this thread needs to return control
647 * to the scheduler.
648 *
649 * If this function should yield and flush is set, it will perform a remote
650 * TLB flush before yielding.
651 *
652 * If this function yields, iter->yielded is set and the caller must skip to
653 * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
654 * over the paging structures to allow the iterator to continue its traversal
655 * from the paging structure root.
656 *
657 * Returns true if this function yielded.
658 */
tdp_mmu_iter_cond_resched(struct kvm * kvm,struct tdp_iter * iter,bool flush,bool shared)659 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
660 struct tdp_iter *iter,
661 bool flush, bool shared)
662 {
663 WARN_ON_ONCE(iter->yielded);
664
665 /* Ensure forward progress has been made before yielding. */
666 if (iter->next_last_level_gfn == iter->yielded_gfn)
667 return false;
668
669 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
670 if (flush)
671 kvm_flush_remote_tlbs(kvm);
672
673 rcu_read_unlock();
674
675 if (shared)
676 cond_resched_rwlock_read(&kvm->mmu_lock);
677 else
678 cond_resched_rwlock_write(&kvm->mmu_lock);
679
680 rcu_read_lock();
681
682 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
683
684 iter->yielded = true;
685 }
686
687 return iter->yielded;
688 }
689
tdp_mmu_max_gfn_exclusive(void)690 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
691 {
692 /*
693 * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
694 * a gpa range that would exceed the max gfn, and KVM does not create
695 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
696 * the slow emulation path every time.
697 */
698 return kvm_mmu_max_gfn() + 1;
699 }
700
__tdp_mmu_zap_root(struct kvm * kvm,struct kvm_mmu_page * root,bool shared,int zap_level)701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
702 bool shared, int zap_level)
703 {
704 struct tdp_iter iter;
705
706 gfn_t end = tdp_mmu_max_gfn_exclusive();
707 gfn_t start = 0;
708
709 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
710 retry:
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
712 continue;
713
714 if (!is_shadow_present_pte(iter.old_spte))
715 continue;
716
717 if (iter.level > zap_level)
718 continue;
719
720 if (!shared)
721 tdp_mmu_iter_set_spte(kvm, &iter, 0);
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
723 goto retry;
724 }
725 }
726
tdp_mmu_zap_root(struct kvm * kvm,struct kvm_mmu_page * root,bool shared)727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
728 bool shared)
729 {
730
731 /*
732 * The root must have an elevated refcount so that it's reachable via
733 * mmu_notifier callbacks, which allows this path to yield and drop
734 * mmu_lock. When handling an unmap/release mmu_notifier command, KVM
735 * must drop all references to relevant pages prior to completing the
736 * callback. Dropping mmu_lock with an unreachable root would result
737 * in zapping SPTEs after a relevant mmu_notifier callback completes
738 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
739 * dirty accessed bits to the SPTE's associated struct page.
740 */
741 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
742
743 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
744
745 rcu_read_lock();
746
747 /*
748 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
749 * split the zap into two passes. On the first pass, zap at the 1gb
750 * level, and then zap top-level SPs on the second pass. "1gb" is not
751 * arbitrary, as KVM must be able to zap a 1gb shadow page without
752 * inducing a stall to allow in-place replacement with a 1gb hugepage.
753 *
754 * Because zapping a SP recurses on its children, stepping down to
755 * PG_LEVEL_4K in the iterator itself is unnecessary.
756 */
757 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
758 __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
759
760 rcu_read_unlock();
761 }
762
kvm_tdp_mmu_zap_sp(struct kvm * kvm,struct kvm_mmu_page * sp)763 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
764 {
765 u64 old_spte;
766
767 /*
768 * This helper intentionally doesn't allow zapping a root shadow page,
769 * which doesn't have a parent page table and thus no associated entry.
770 */
771 if (WARN_ON_ONCE(!sp->ptep))
772 return false;
773
774 old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
775 if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
776 return false;
777
778 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
779 sp->gfn, sp->role.level + 1);
780
781 return true;
782 }
783
784 /*
785 * If can_yield is true, will release the MMU lock and reschedule if the
786 * scheduler needs the CPU or there is contention on the MMU lock. If this
787 * function cannot yield, it will not release the MMU lock or reschedule and
788 * the caller must ensure it does not supply too large a GFN range, or the
789 * operation can cause a soft lockup.
790 */
tdp_mmu_zap_leafs(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,bool can_yield,bool flush)791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
792 gfn_t start, gfn_t end, bool can_yield, bool flush)
793 {
794 struct tdp_iter iter;
795
796 end = min(end, tdp_mmu_max_gfn_exclusive());
797
798 lockdep_assert_held_write(&kvm->mmu_lock);
799
800 rcu_read_lock();
801
802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
803 if (can_yield &&
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
805 flush = false;
806 continue;
807 }
808
809 if (!is_shadow_present_pte(iter.old_spte) ||
810 !is_last_spte(iter.old_spte, iter.level))
811 continue;
812
813 tdp_mmu_iter_set_spte(kvm, &iter, 0);
814 flush = true;
815 }
816
817 rcu_read_unlock();
818
819 /*
820 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
821 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
822 */
823 return flush;
824 }
825
826 /*
827 * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
828 * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
829 * more SPTEs were zapped since the MMU lock was last acquired.
830 */
kvm_tdp_mmu_zap_leafs(struct kvm * kvm,gfn_t start,gfn_t end,bool flush)831 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
832 {
833 struct kvm_mmu_page *root;
834
835 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
836 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
837
838 return flush;
839 }
840
kvm_tdp_mmu_zap_all(struct kvm * kvm)841 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
842 {
843 struct kvm_mmu_page *root;
844
845 /*
846 * Zap all roots, including invalid roots, as all SPTEs must be dropped
847 * before returning to the caller. Zap directly even if the root is
848 * also being zapped by a worker. Walking zapped top-level SPTEs isn't
849 * all that expensive and mmu_lock is already held, which means the
850 * worker has yielded, i.e. flushing the work instead of zapping here
851 * isn't guaranteed to be any faster.
852 *
853 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
854 * is being destroyed or the userspace VMM has exited. In both cases,
855 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
856 */
857 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
858 tdp_mmu_zap_root(kvm, root, false);
859 }
860
861 /*
862 * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
863 * zap" completes.
864 */
kvm_tdp_mmu_zap_invalidated_roots(struct kvm * kvm)865 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
866 {
867 struct kvm_mmu_page *root;
868
869 read_lock(&kvm->mmu_lock);
870
871 for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
872 if (!root->tdp_mmu_scheduled_root_to_zap)
873 continue;
874
875 root->tdp_mmu_scheduled_root_to_zap = false;
876 KVM_BUG_ON(!root->role.invalid, kvm);
877
878 /*
879 * A TLB flush is not necessary as KVM performs a local TLB
880 * flush when allocating a new root (see kvm_mmu_load()), and
881 * when migrating a vCPU to a different pCPU. Note, the local
882 * TLB flush on reuse also invalidates paging-structure-cache
883 * entries, i.e. TLB entries for intermediate paging structures,
884 * that may be zapped, as such entries are associated with the
885 * ASID on both VMX and SVM.
886 */
887 tdp_mmu_zap_root(kvm, root, true);
888
889 /*
890 * The referenced needs to be put *after* zapping the root, as
891 * the root must be reachable by mmu_notifiers while it's being
892 * zapped
893 */
894 kvm_tdp_mmu_put_root(kvm, root, true);
895 }
896
897 read_unlock(&kvm->mmu_lock);
898 }
899
900 /*
901 * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
902 * is about to be zapped, e.g. in response to a memslots update. The actual
903 * zapping is done separately so that it happens with mmu_lock with read,
904 * whereas invalidating roots must be done with mmu_lock held for write (unless
905 * the VM is being destroyed).
906 *
907 * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
908 * See kvm_tdp_mmu_get_vcpu_root_hpa().
909 */
kvm_tdp_mmu_invalidate_all_roots(struct kvm * kvm)910 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
911 {
912 struct kvm_mmu_page *root;
913
914 /*
915 * mmu_lock must be held for write to ensure that a root doesn't become
916 * invalid while there are active readers (invalidating a root while
917 * there are active readers may or may not be problematic in practice,
918 * but it's uncharted territory and not supported).
919 *
920 * Waive the assertion if there are no users of @kvm, i.e. the VM is
921 * being destroyed after all references have been put, or if no vCPUs
922 * have been created (which means there are no roots), i.e. the VM is
923 * being destroyed in an error path of KVM_CREATE_VM.
924 */
925 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
926 refcount_read(&kvm->users_count) && kvm->created_vcpus)
927 lockdep_assert_held_write(&kvm->mmu_lock);
928
929 /*
930 * As above, mmu_lock isn't held when destroying the VM! There can't
931 * be other references to @kvm, i.e. nothing else can invalidate roots
932 * or get/put references to roots.
933 */
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
935 /*
936 * Note, invalid roots can outlive a memslot update! Invalid
937 * roots must be *zapped* before the memslot update completes,
938 * but a different task can acquire a reference and keep the
939 * root alive after its been zapped.
940 */
941 if (!root->role.invalid) {
942 root->tdp_mmu_scheduled_root_to_zap = true;
943 root->role.invalid = true;
944 }
945 }
946 }
947
948 /*
949 * Installs a last-level SPTE to handle a TDP page fault.
950 * (NPT/EPT violation/misconfiguration)
951 */
tdp_mmu_map_handle_target_level(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,struct tdp_iter * iter)952 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
953 struct kvm_page_fault *fault,
954 struct tdp_iter *iter)
955 {
956 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
957 u64 new_spte;
958 int ret = RET_PF_FIXED;
959 bool wrprot = false;
960
961 if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
962 return RET_PF_RETRY;
963
964 if (unlikely(!fault->slot))
965 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
966 else
967 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
968 fault->pfn, iter->old_spte, fault->prefetch, true,
969 fault->map_writable, &new_spte);
970
971 if (new_spte == iter->old_spte)
972 ret = RET_PF_SPURIOUS;
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
974 return RET_PF_RETRY;
975 else if (is_shadow_present_pte(iter->old_spte) &&
976 !is_last_spte(iter->old_spte, iter->level))
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
978
979 /*
980 * If the page fault was caused by a write but the page is write
981 * protected, emulation is needed. If the emulation was skipped,
982 * the vCPU would have the same fault again.
983 */
984 if (wrprot) {
985 if (fault->write)
986 ret = RET_PF_EMULATE;
987 }
988
989 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
990 if (unlikely(is_mmio_spte(new_spte))) {
991 vcpu->stat.pf_mmio_spte_created++;
992 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
993 new_spte);
994 ret = RET_PF_EMULATE;
995 } else {
996 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
997 rcu_dereference(iter->sptep));
998 }
999
1000 return ret;
1001 }
1002
1003 /*
1004 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1005 * provided page table.
1006 *
1007 * @kvm: kvm instance
1008 * @iter: a tdp_iter instance currently on the SPTE that should be set
1009 * @sp: The new TDP page table to install.
1010 * @shared: This operation is running under the MMU lock in read mode.
1011 *
1012 * Returns: 0 if the new page table was installed. Non-0 if the page table
1013 * could not be installed (e.g. the atomic compare-exchange failed).
1014 */
tdp_mmu_link_sp(struct kvm * kvm,struct tdp_iter * iter,struct kvm_mmu_page * sp,bool shared)1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1016 struct kvm_mmu_page *sp, bool shared)
1017 {
1018 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1019 int ret = 0;
1020
1021 if (shared) {
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1023 if (ret)
1024 return ret;
1025 } else {
1026 tdp_mmu_iter_set_spte(kvm, iter, spte);
1027 }
1028
1029 tdp_account_mmu_page(kvm, sp);
1030
1031 return 0;
1032 }
1033
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1035 struct kvm_mmu_page *sp, bool shared);
1036
1037 /*
1038 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1039 * page tables and SPTEs to translate the faulting guest physical address.
1040 */
kvm_tdp_mmu_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)1041 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1042 {
1043 struct kvm_mmu *mmu = vcpu->arch.mmu;
1044 struct kvm *kvm = vcpu->kvm;
1045 struct tdp_iter iter;
1046 struct kvm_mmu_page *sp;
1047 int ret = RET_PF_RETRY;
1048
1049 kvm_mmu_hugepage_adjust(vcpu, fault);
1050
1051 trace_kvm_mmu_spte_requested(fault);
1052
1053 rcu_read_lock();
1054
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1056 int r;
1057
1058 if (fault->nx_huge_page_workaround_enabled)
1059 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1060
1061 /*
1062 * If SPTE has been frozen by another thread, just give up and
1063 * retry, avoiding unnecessary page table allocation and free.
1064 */
1065 if (is_removed_spte(iter.old_spte))
1066 goto retry;
1067
1068 if (iter.level == fault->goal_level)
1069 goto map_target_level;
1070
1071 /* Step down into the lower level page table if it exists. */
1072 if (is_shadow_present_pte(iter.old_spte) &&
1073 !is_large_pte(iter.old_spte))
1074 continue;
1075
1076 /*
1077 * The SPTE is either non-present or points to a huge page that
1078 * needs to be split.
1079 */
1080 sp = tdp_mmu_alloc_sp(vcpu);
1081 tdp_mmu_init_child_sp(sp, &iter);
1082
1083 sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1084
1085 if (is_shadow_present_pte(iter.old_spte))
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1087 else
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1089
1090 /*
1091 * Force the guest to retry if installing an upper level SPTE
1092 * failed, e.g. because a different task modified the SPTE.
1093 */
1094 if (r) {
1095 tdp_mmu_free_sp(sp);
1096 goto retry;
1097 }
1098
1099 if (fault->huge_page_disallowed &&
1100 fault->req_level >= iter.level) {
1101 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1102 if (sp->nx_huge_page_disallowed)
1103 track_possible_nx_huge_page(kvm, sp);
1104 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1105 }
1106 }
1107
1108 /*
1109 * The walk aborted before reaching the target level, e.g. because the
1110 * iterator detected an upper level SPTE was frozen during traversal.
1111 */
1112 WARN_ON_ONCE(iter.level == fault->goal_level);
1113 goto retry;
1114
1115 map_target_level:
1116 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1117
1118 retry:
1119 rcu_read_unlock();
1120 return ret;
1121 }
1122
kvm_tdp_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool flush)1123 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1124 bool flush)
1125 {
1126 struct kvm_mmu_page *root;
1127
1128 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
1129 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1130 range->may_block, flush);
1131
1132 return flush;
1133 }
1134
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1136 struct kvm_gfn_range *range);
1137
kvm_tdp_mmu_handle_gfn(struct kvm * kvm,struct kvm_gfn_range * range,tdp_handler_t handler)1138 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1139 struct kvm_gfn_range *range,
1140 tdp_handler_t handler)
1141 {
1142 struct kvm_mmu_page *root;
1143 struct tdp_iter iter;
1144 bool ret = false;
1145
1146 /*
1147 * Don't support rescheduling, none of the MMU notifiers that funnel
1148 * into this helper allow blocking; it'd be dead, wasteful code.
1149 */
1150 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1151 rcu_read_lock();
1152
1153 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1154 ret |= handler(kvm, &iter, range);
1155
1156 rcu_read_unlock();
1157 }
1158
1159 return ret;
1160 }
1161
1162 /*
1163 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1164 * if any of the GFNs in the range have been accessed.
1165 *
1166 * No need to mark the corresponding PFN as accessed as this call is coming
1167 * from the clear_young() or clear_flush_young() notifier, which uses the
1168 * return value to determine if the page has been accessed.
1169 */
age_gfn_range(struct kvm * kvm,struct tdp_iter * iter,struct kvm_gfn_range * range)1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1171 struct kvm_gfn_range *range)
1172 {
1173 u64 new_spte;
1174
1175 /* If we have a non-accessed entry we don't need to change the pte. */
1176 if (!is_accessed_spte(iter->old_spte))
1177 return false;
1178
1179 if (spte_ad_enabled(iter->old_spte)) {
1180 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1181 iter->old_spte,
1182 shadow_accessed_mask,
1183 iter->level);
1184 new_spte = iter->old_spte & ~shadow_accessed_mask;
1185 } else {
1186 /*
1187 * Capture the dirty status of the page, so that it doesn't get
1188 * lost when the SPTE is marked for access tracking.
1189 */
1190 if (is_writable_pte(iter->old_spte))
1191 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1192
1193 new_spte = mark_spte_for_access_track(iter->old_spte);
1194 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1195 iter->old_spte, new_spte,
1196 iter->level);
1197 }
1198
1199 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1200 iter->old_spte, new_spte);
1201 return true;
1202 }
1203
kvm_tdp_mmu_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1204 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1205 {
1206 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1207 }
1208
test_age_gfn(struct kvm * kvm,struct tdp_iter * iter,struct kvm_gfn_range * range)1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1210 struct kvm_gfn_range *range)
1211 {
1212 return is_accessed_spte(iter->old_spte);
1213 }
1214
kvm_tdp_mmu_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1215 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1216 {
1217 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1218 }
1219
set_spte_gfn(struct kvm * kvm,struct tdp_iter * iter,struct kvm_gfn_range * range)1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1221 struct kvm_gfn_range *range)
1222 {
1223 u64 new_spte;
1224
1225 /* Huge pages aren't expected to be modified without first being zapped. */
1226 WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
1227
1228 if (iter->level != PG_LEVEL_4K ||
1229 !is_shadow_present_pte(iter->old_spte))
1230 return false;
1231
1232 /*
1233 * Note, when changing a read-only SPTE, it's not strictly necessary to
1234 * zero the SPTE before setting the new PFN, but doing so preserves the
1235 * invariant that the PFN of a present * leaf SPTE can never change.
1236 * See handle_changed_spte().
1237 */
1238 tdp_mmu_iter_set_spte(kvm, iter, 0);
1239
1240 if (!pte_write(range->arg.pte)) {
1241 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1242 pte_pfn(range->arg.pte));
1243
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1245 }
1246
1247 return true;
1248 }
1249
1250 /*
1251 * Handle the changed_pte MMU notifier for the TDP MMU.
1252 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1253 * notifier.
1254 * Returns non-zero if a flush is needed before releasing the MMU lock.
1255 */
kvm_tdp_mmu_set_spte_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1256 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1257 {
1258 /*
1259 * No need to handle the remote TLB flush under RCU protection, the
1260 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1261 * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1262 */
1263 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1264 }
1265
1266 /*
1267 * Remove write access from all SPTEs at or above min_level that map GFNs
1268 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1269 * be flushed.
1270 */
wrprot_gfn_range(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,int min_level)1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1272 gfn_t start, gfn_t end, int min_level)
1273 {
1274 struct tdp_iter iter;
1275 u64 new_spte;
1276 bool spte_set = false;
1277
1278 rcu_read_lock();
1279
1280 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1281
1282 for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1283 retry:
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1285 continue;
1286
1287 if (!is_shadow_present_pte(iter.old_spte) ||
1288 !is_last_spte(iter.old_spte, iter.level) ||
1289 !(iter.old_spte & PT_WRITABLE_MASK))
1290 continue;
1291
1292 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1293
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1295 goto retry;
1296
1297 spte_set = true;
1298 }
1299
1300 rcu_read_unlock();
1301 return spte_set;
1302 }
1303
1304 /*
1305 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1306 * only affect leaf SPTEs down to min_level.
1307 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1308 */
kvm_tdp_mmu_wrprot_slot(struct kvm * kvm,const struct kvm_memory_slot * slot,int min_level)1309 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1310 const struct kvm_memory_slot *slot, int min_level)
1311 {
1312 struct kvm_mmu_page *root;
1313 bool spte_set = false;
1314
1315 lockdep_assert_held_read(&kvm->mmu_lock);
1316
1317 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1319 slot->base_gfn + slot->npages, min_level);
1320
1321 return spte_set;
1322 }
1323
__tdp_mmu_alloc_sp_for_split(gfp_t gfp)1324 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1325 {
1326 struct kvm_mmu_page *sp;
1327
1328 gfp |= __GFP_ZERO;
1329
1330 sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1331 if (!sp)
1332 return NULL;
1333
1334 sp->spt = (void *)__get_free_page(gfp);
1335 if (!sp->spt) {
1336 kmem_cache_free(mmu_page_header_cache, sp);
1337 return NULL;
1338 }
1339
1340 return sp;
1341 }
1342
tdp_mmu_alloc_sp_for_split(struct kvm * kvm,struct tdp_iter * iter,bool shared)1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1344 struct tdp_iter *iter,
1345 bool shared)
1346 {
1347 struct kvm_mmu_page *sp;
1348
1349 /*
1350 * Since we are allocating while under the MMU lock we have to be
1351 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1352 * reclaim and to avoid making any filesystem callbacks (which can end
1353 * up invoking KVM MMU notifiers, resulting in a deadlock).
1354 *
1355 * If this allocation fails we drop the lock and retry with reclaim
1356 * allowed.
1357 */
1358 sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1359 if (sp)
1360 return sp;
1361
1362 rcu_read_unlock();
1363
1364 if (shared)
1365 read_unlock(&kvm->mmu_lock);
1366 else
1367 write_unlock(&kvm->mmu_lock);
1368
1369 iter->yielded = true;
1370 sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1371
1372 if (shared)
1373 read_lock(&kvm->mmu_lock);
1374 else
1375 write_lock(&kvm->mmu_lock);
1376
1377 rcu_read_lock();
1378
1379 return sp;
1380 }
1381
1382 /* Note, the caller is responsible for initializing @sp. */
tdp_mmu_split_huge_page(struct kvm * kvm,struct tdp_iter * iter,struct kvm_mmu_page * sp,bool shared)1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1384 struct kvm_mmu_page *sp, bool shared)
1385 {
1386 const u64 huge_spte = iter->old_spte;
1387 const int level = iter->level;
1388 int ret, i;
1389
1390 /*
1391 * No need for atomics when writing to sp->spt since the page table has
1392 * not been linked in yet and thus is not reachable from any other CPU.
1393 */
1394 for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1395 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1396
1397 /*
1398 * Replace the huge spte with a pointer to the populated lower level
1399 * page table. Since we are making this change without a TLB flush vCPUs
1400 * will see a mix of the split mappings and the original huge mapping,
1401 * depending on what's currently in their TLB. This is fine from a
1402 * correctness standpoint since the translation will be the same either
1403 * way.
1404 */
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1406 if (ret)
1407 goto out;
1408
1409 /*
1410 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1411 * are overwriting from the page stats. But we have to manually update
1412 * the page stats with the new present child pages.
1413 */
1414 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1415
1416 out:
1417 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1418 return ret;
1419 }
1420
tdp_mmu_split_huge_pages_root(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end,int target_level,bool shared)1421 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1422 struct kvm_mmu_page *root,
1423 gfn_t start, gfn_t end,
1424 int target_level, bool shared)
1425 {
1426 struct kvm_mmu_page *sp = NULL;
1427 struct tdp_iter iter;
1428 int ret = 0;
1429
1430 rcu_read_lock();
1431
1432 /*
1433 * Traverse the page table splitting all huge pages above the target
1434 * level into one lower level. For example, if we encounter a 1GB page
1435 * we split it into 512 2MB pages.
1436 *
1437 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1438 * to visit an SPTE before ever visiting its children, which means we
1439 * will correctly recursively split huge pages that are more than one
1440 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1441 * and then splitting each of those to 512 4KB pages).
1442 */
1443 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1444 retry:
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1446 continue;
1447
1448 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1449 continue;
1450
1451 if (!sp) {
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1453 if (!sp) {
1454 ret = -ENOMEM;
1455 trace_kvm_mmu_split_huge_page(iter.gfn,
1456 iter.old_spte,
1457 iter.level, ret);
1458 break;
1459 }
1460
1461 if (iter.yielded)
1462 continue;
1463 }
1464
1465 tdp_mmu_init_child_sp(sp, &iter);
1466
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1468 goto retry;
1469
1470 sp = NULL;
1471 }
1472
1473 rcu_read_unlock();
1474
1475 /*
1476 * It's possible to exit the loop having never used the last sp if, for
1477 * example, a vCPU doing HugePage NX splitting wins the race and
1478 * installs its own sp in place of the last sp we tried to split.
1479 */
1480 if (sp)
1481 tdp_mmu_free_sp(sp);
1482
1483 return ret;
1484 }
1485
1486
1487 /*
1488 * Try to split all huge pages mapped by the TDP MMU down to the target level.
1489 */
kvm_tdp_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level,bool shared)1490 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1491 const struct kvm_memory_slot *slot,
1492 gfn_t start, gfn_t end,
1493 int target_level, bool shared)
1494 {
1495 struct kvm_mmu_page *root;
1496 int r = 0;
1497
1498 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1499
1500 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1501 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1502 if (r) {
1503 kvm_tdp_mmu_put_root(kvm, root, shared);
1504 break;
1505 }
1506 }
1507 }
1508
tdp_mmu_need_write_protect(struct kvm_mmu_page * sp)1509 static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
1510 {
1511 /*
1512 * All TDP MMU shadow pages share the same role as their root, aside
1513 * from level, so it is valid to key off any shadow page to determine if
1514 * write protection is needed for an entire tree.
1515 */
1516 return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
1517 }
1518
1519 /*
1520 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1521 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1522 * If AD bits are not enabled, this will require clearing the writable bit on
1523 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1524 * be flushed.
1525 */
clear_dirty_gfn_range(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t start,gfn_t end)1526 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1527 gfn_t start, gfn_t end)
1528 {
1529 const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
1530 shadow_dirty_mask;
1531 struct tdp_iter iter;
1532 bool spte_set = false;
1533
1534 rcu_read_lock();
1535
1536 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1537 retry:
1538 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1539 continue;
1540
1541 if (!is_shadow_present_pte(iter.old_spte))
1542 continue;
1543
1544 KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1545 spte_ad_need_write_protect(iter.old_spte));
1546
1547 if (!(iter.old_spte & dbit))
1548 continue;
1549
1550 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1551 goto retry;
1552
1553 spte_set = true;
1554 }
1555
1556 rcu_read_unlock();
1557 return spte_set;
1558 }
1559
1560 /*
1561 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1562 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1563 * If AD bits are not enabled, this will require clearing the writable bit on
1564 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1565 * be flushed.
1566 */
kvm_tdp_mmu_clear_dirty_slot(struct kvm * kvm,const struct kvm_memory_slot * slot)1567 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1568 const struct kvm_memory_slot *slot)
1569 {
1570 struct kvm_mmu_page *root;
1571 bool spte_set = false;
1572
1573 lockdep_assert_held_read(&kvm->mmu_lock);
1574
1575 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1576 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1577 slot->base_gfn + slot->npages);
1578
1579 return spte_set;
1580 }
1581
1582 /*
1583 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1584 * set in mask, starting at gfn. The given memslot is expected to contain all
1585 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1586 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1587 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1588 */
clear_dirty_pt_masked(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t gfn,unsigned long mask,bool wrprot)1589 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1590 gfn_t gfn, unsigned long mask, bool wrprot)
1591 {
1592 const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
1593 shadow_dirty_mask;
1594 struct tdp_iter iter;
1595
1596 lockdep_assert_held_write(&kvm->mmu_lock);
1597
1598 rcu_read_lock();
1599
1600 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1601 gfn + BITS_PER_LONG) {
1602 if (!mask)
1603 break;
1604
1605 KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1606 spte_ad_need_write_protect(iter.old_spte));
1607
1608 if (iter.level > PG_LEVEL_4K ||
1609 !(mask & (1UL << (iter.gfn - gfn))))
1610 continue;
1611
1612 mask &= ~(1UL << (iter.gfn - gfn));
1613
1614 if (!(iter.old_spte & dbit))
1615 continue;
1616
1617 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1618 iter.old_spte, dbit,
1619 iter.level);
1620
1621 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1622 iter.old_spte,
1623 iter.old_spte & ~dbit);
1624 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1625 }
1626
1627 rcu_read_unlock();
1628 }
1629
1630 /*
1631 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1632 * set in mask, starting at gfn. The given memslot is expected to contain all
1633 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1634 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1635 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1636 */
kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,unsigned long mask,bool wrprot)1637 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1638 struct kvm_memory_slot *slot,
1639 gfn_t gfn, unsigned long mask,
1640 bool wrprot)
1641 {
1642 struct kvm_mmu_page *root;
1643
1644 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1645 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1646 }
1647
zap_collapsible_spte_range(struct kvm * kvm,struct kvm_mmu_page * root,const struct kvm_memory_slot * slot)1648 static void zap_collapsible_spte_range(struct kvm *kvm,
1649 struct kvm_mmu_page *root,
1650 const struct kvm_memory_slot *slot)
1651 {
1652 gfn_t start = slot->base_gfn;
1653 gfn_t end = start + slot->npages;
1654 struct tdp_iter iter;
1655 int max_mapping_level;
1656
1657 rcu_read_lock();
1658
1659 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1660 retry:
1661 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1662 continue;
1663
1664 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1665 !is_shadow_present_pte(iter.old_spte))
1666 continue;
1667
1668 /*
1669 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1670 * a large page size, then its parent would have been zapped
1671 * instead of stepping down.
1672 */
1673 if (is_last_spte(iter.old_spte, iter.level))
1674 continue;
1675
1676 /*
1677 * If iter.gfn resides outside of the slot, i.e. the page for
1678 * the current level overlaps but is not contained by the slot,
1679 * then the SPTE can't be made huge. More importantly, trying
1680 * to query that info from slot->arch.lpage_info will cause an
1681 * out-of-bounds access.
1682 */
1683 if (iter.gfn < start || iter.gfn >= end)
1684 continue;
1685
1686 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1687 iter.gfn, PG_LEVEL_NUM);
1688 if (max_mapping_level < iter.level)
1689 continue;
1690
1691 /* Note, a successful atomic zap also does a remote TLB flush. */
1692 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1693 goto retry;
1694 }
1695
1696 rcu_read_unlock();
1697 }
1698
1699 /*
1700 * Zap non-leaf SPTEs (and free their associated page tables) which could
1701 * be replaced by huge pages, for GFNs within the slot.
1702 */
kvm_tdp_mmu_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)1703 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1704 const struct kvm_memory_slot *slot)
1705 {
1706 struct kvm_mmu_page *root;
1707
1708 lockdep_assert_held_read(&kvm->mmu_lock);
1709
1710 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1711 zap_collapsible_spte_range(kvm, root, slot);
1712 }
1713
1714 /*
1715 * Removes write access on the last level SPTE mapping this GFN and unsets the
1716 * MMU-writable bit to ensure future writes continue to be intercepted.
1717 * Returns true if an SPTE was set and a TLB flush is needed.
1718 */
write_protect_gfn(struct kvm * kvm,struct kvm_mmu_page * root,gfn_t gfn,int min_level)1719 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1720 gfn_t gfn, int min_level)
1721 {
1722 struct tdp_iter iter;
1723 u64 new_spte;
1724 bool spte_set = false;
1725
1726 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1727
1728 rcu_read_lock();
1729
1730 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1731 if (!is_shadow_present_pte(iter.old_spte) ||
1732 !is_last_spte(iter.old_spte, iter.level))
1733 continue;
1734
1735 new_spte = iter.old_spte &
1736 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1737
1738 if (new_spte == iter.old_spte)
1739 break;
1740
1741 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1742 spte_set = true;
1743 }
1744
1745 rcu_read_unlock();
1746
1747 return spte_set;
1748 }
1749
1750 /*
1751 * Removes write access on the last level SPTE mapping this GFN and unsets the
1752 * MMU-writable bit to ensure future writes continue to be intercepted.
1753 * Returns true if an SPTE was set and a TLB flush is needed.
1754 */
kvm_tdp_mmu_write_protect_gfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int min_level)1755 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1756 struct kvm_memory_slot *slot, gfn_t gfn,
1757 int min_level)
1758 {
1759 struct kvm_mmu_page *root;
1760 bool spte_set = false;
1761
1762 lockdep_assert_held_write(&kvm->mmu_lock);
1763 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1764 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1765
1766 return spte_set;
1767 }
1768
1769 /*
1770 * Return the level of the lowest level SPTE added to sptes.
1771 * That SPTE may be non-present.
1772 *
1773 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1774 */
kvm_tdp_mmu_get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)1775 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1776 int *root_level)
1777 {
1778 struct tdp_iter iter;
1779 struct kvm_mmu *mmu = vcpu->arch.mmu;
1780 gfn_t gfn = addr >> PAGE_SHIFT;
1781 int leaf = -1;
1782
1783 *root_level = vcpu->arch.mmu->root_role.level;
1784
1785 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1786 leaf = iter.level;
1787 sptes[leaf] = iter.old_spte;
1788 }
1789
1790 return leaf;
1791 }
1792
1793 /*
1794 * Returns the last level spte pointer of the shadow page walk for the given
1795 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1796 * walk could be performed, returns NULL and *spte does not contain valid data.
1797 *
1798 * Contract:
1799 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1800 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1801 *
1802 * WARNING: This function is only intended to be called during fast_page_fault.
1803 */
kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,u64 addr,u64 * spte)1804 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1805 u64 *spte)
1806 {
1807 struct tdp_iter iter;
1808 struct kvm_mmu *mmu = vcpu->arch.mmu;
1809 gfn_t gfn = addr >> PAGE_SHIFT;
1810 tdp_ptep_t sptep = NULL;
1811
1812 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1813 *spte = iter.old_spte;
1814 sptep = iter.sptep;
1815 }
1816
1817 /*
1818 * Perform the rcu_dereference to get the raw spte pointer value since
1819 * we are passing it up to fast_page_fault, which is shared with the
1820 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1821 * annotation.
1822 *
1823 * This is safe since fast_page_fault obeys the contracts of this
1824 * function as well as all TDP MMU contracts around modifying SPTEs
1825 * outside of mmu_lock.
1826 */
1827 return rcu_dereference(sptep);
1828 }
1829