1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include "mmu.h" 5 #include "mmu_internal.h" 6 #include "mmutrace.h" 7 #include "tdp_iter.h" 8 #include "tdp_mmu.h" 9 #include "spte.h" 10 11 #include <asm/cmpxchg.h> 12 #include <trace/events/kvm.h> 13 14 /* Initializes the TDP MMU for the VM, if enabled. */ 15 int kvm_mmu_init_tdp_mmu(struct kvm *kvm) 16 { 17 struct workqueue_struct *wq; 18 19 wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); 20 if (!wq) 21 return -ENOMEM; 22 23 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 24 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 25 kvm->arch.tdp_mmu_zap_wq = wq; 26 return 1; 27 } 28 29 /* Arbitrarily returns true so that this may be used in if statements. */ 30 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 31 bool shared) 32 { 33 if (shared) 34 lockdep_assert_held_read(&kvm->mmu_lock); 35 else 36 lockdep_assert_held_write(&kvm->mmu_lock); 37 38 return true; 39 } 40 41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42 { 43 /* Also waits for any queued work items. */ 44 destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); 45 46 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); 47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 48 49 /* 50 * Ensure that all the outstanding RCU callbacks to free shadow pages 51 * can run before the VM is torn down. Work items on tdp_mmu_zap_wq 52 * can call kvm_tdp_mmu_put_root and create new callbacks. 53 */ 54 rcu_barrier(); 55 } 56 57 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 58 { 59 free_page((unsigned long)sp->spt); 60 kmem_cache_free(mmu_page_header_cache, sp); 61 } 62 63 /* 64 * This is called through call_rcu in order to free TDP page table memory 65 * safely with respect to other kernel threads that may be operating on 66 * the memory. 67 * By only accessing TDP MMU page table memory in an RCU read critical 68 * section, and freeing it after a grace period, lockless access to that 69 * memory won't use it after it is freed. 70 */ 71 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 72 { 73 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 74 rcu_head); 75 76 tdp_mmu_free_sp(sp); 77 } 78 79 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 80 bool shared); 81 82 static void tdp_mmu_zap_root_work(struct work_struct *work) 83 { 84 struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, 85 tdp_mmu_async_work); 86 struct kvm *kvm = root->tdp_mmu_async_data; 87 88 read_lock(&kvm->mmu_lock); 89 90 /* 91 * A TLB flush is not necessary as KVM performs a local TLB flush when 92 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU 93 * to a different pCPU. Note, the local TLB flush on reuse also 94 * invalidates any paging-structure-cache entries, i.e. TLB entries for 95 * intermediate paging structures, that may be zapped, as such entries 96 * are associated with the ASID on both VMX and SVM. 97 */ 98 tdp_mmu_zap_root(kvm, root, true); 99 100 /* 101 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for 102 * avoiding an infinite loop. By design, the root is reachable while 103 * it's being asynchronously zapped, thus a different task can put its 104 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an 105 * asynchronously zapped root is unavoidable. 106 */ 107 kvm_tdp_mmu_put_root(kvm, root, true); 108 109 read_unlock(&kvm->mmu_lock); 110 } 111 112 static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) 113 { 114 root->tdp_mmu_async_data = kvm; 115 INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); 116 queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); 117 } 118 119 static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) 120 { 121 union kvm_mmu_page_role role = page->role; 122 role.invalid = true; 123 124 /* No need to use cmpxchg, only the invalid bit can change. */ 125 role.word = xchg(&page->role.word, role.word); 126 return role.invalid; 127 } 128 129 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 130 bool shared) 131 { 132 kvm_lockdep_assert_mmu_lock_held(kvm, shared); 133 134 if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 135 return; 136 137 WARN_ON(!is_tdp_mmu_page(root)); 138 139 /* 140 * The root now has refcount=0. It is valid, but readers already 141 * cannot acquire a reference to it because kvm_tdp_mmu_get_root() 142 * rejects it. This remains true for the rest of the execution 143 * of this function, because readers visit valid roots only 144 * (except for tdp_mmu_zap_root_work(), which however 145 * does not acquire any reference itself). 146 * 147 * Even though there are flows that need to visit all roots for 148 * correctness, they all take mmu_lock for write, so they cannot yet 149 * run concurrently. The same is true after kvm_tdp_root_mark_invalid, 150 * since the root still has refcount=0. 151 * 152 * However, tdp_mmu_zap_root can yield, and writers do not expect to 153 * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). 154 * So the root temporarily gets an extra reference, going to refcount=1 155 * while staying invalid. Readers still cannot acquire any reference; 156 * but writers are now allowed to run if tdp_mmu_zap_root yields and 157 * they might take an extra reference if they themselves yield. 158 * Therefore, when the reference is given back by the worker, 159 * there is no guarantee that the refcount is still 1. If not, whoever 160 * puts the last reference will free the page, but they will not have to 161 * zap the root because a root cannot go from invalid to valid. 162 */ 163 if (!kvm_tdp_root_mark_invalid(root)) { 164 refcount_set(&root->tdp_mmu_root_count, 1); 165 166 /* 167 * Zapping the root in a worker is not just "nice to have"; 168 * it is required because kvm_tdp_mmu_invalidate_all_roots() 169 * skips already-invalid roots. If kvm_tdp_mmu_put_root() did 170 * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() 171 * might return with some roots not zapped yet. 172 */ 173 tdp_mmu_schedule_zap_root(kvm, root); 174 return; 175 } 176 177 spin_lock(&kvm->arch.tdp_mmu_pages_lock); 178 list_del_rcu(&root->link); 179 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 180 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 181 } 182 183 /* 184 * Returns the next root after @prev_root (or the first root if @prev_root is 185 * NULL). A reference to the returned root is acquired, and the reference to 186 * @prev_root is released (the caller obviously must hold a reference to 187 * @prev_root if it's non-NULL). 188 * 189 * If @only_valid is true, invalid roots are skipped. 190 * 191 * Returns NULL if the end of tdp_mmu_roots was reached. 192 */ 193 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 194 struct kvm_mmu_page *prev_root, 195 bool shared, bool only_valid) 196 { 197 struct kvm_mmu_page *next_root; 198 199 rcu_read_lock(); 200 201 if (prev_root) 202 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 203 &prev_root->link, 204 typeof(*prev_root), link); 205 else 206 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 207 typeof(*next_root), link); 208 209 while (next_root) { 210 if ((!only_valid || !next_root->role.invalid) && 211 kvm_tdp_mmu_get_root(next_root)) 212 break; 213 214 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 215 &next_root->link, typeof(*next_root), link); 216 } 217 218 rcu_read_unlock(); 219 220 if (prev_root) 221 kvm_tdp_mmu_put_root(kvm, prev_root, shared); 222 223 return next_root; 224 } 225 226 /* 227 * Note: this iterator gets and puts references to the roots it iterates over. 228 * This makes it safe to release the MMU lock and yield within the loop, but 229 * if exiting the loop early, the caller must drop the reference to the most 230 * recent root. (Unless keeping a live reference is desirable.) 231 * 232 * If shared is set, this function is operating under the MMU lock in read 233 * mode. In the unlikely event that this thread must free a root, the lock 234 * will be temporarily dropped and reacquired in write mode. 235 */ 236 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ 237 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ 238 _root; \ 239 _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ 240 if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \ 241 kvm_mmu_page_as_id(_root) != _as_id) { \ 242 } else 243 244 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 245 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) 246 247 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ 248 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) 249 250 /* 251 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 252 * the implication being that any flow that holds mmu_lock for read is 253 * inherently yield-friendly and should use the yield-safe variant above. 254 * Holding mmu_lock for write obviates the need for RCU protection as the list 255 * is guaranteed to be stable. 256 */ 257 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 258 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ 259 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ 260 kvm_mmu_page_as_id(_root) != _as_id) { \ 261 } else 262 263 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) 264 { 265 struct kvm_mmu_page *sp; 266 267 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 268 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 269 270 return sp; 271 } 272 273 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, 274 gfn_t gfn, union kvm_mmu_page_role role) 275 { 276 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); 277 278 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 279 280 sp->role = role; 281 sp->gfn = gfn; 282 sp->ptep = sptep; 283 sp->tdp_mmu_page = true; 284 285 trace_kvm_mmu_get_page(sp, true); 286 } 287 288 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, 289 struct tdp_iter *iter) 290 { 291 struct kvm_mmu_page *parent_sp; 292 union kvm_mmu_page_role role; 293 294 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); 295 296 role = parent_sp->role; 297 role.level--; 298 299 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); 300 } 301 302 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 303 { 304 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; 305 struct kvm *kvm = vcpu->kvm; 306 struct kvm_mmu_page *root; 307 308 lockdep_assert_held_write(&kvm->mmu_lock); 309 310 /* 311 * Check for an existing root before allocating a new one. Note, the 312 * role check prevents consuming an invalid root. 313 */ 314 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 315 if (root->role.word == role.word && 316 kvm_tdp_mmu_get_root(root)) 317 goto out; 318 } 319 320 root = tdp_mmu_alloc_sp(vcpu); 321 tdp_mmu_init_sp(root, NULL, 0, role); 322 323 refcount_set(&root->tdp_mmu_root_count, 1); 324 325 spin_lock(&kvm->arch.tdp_mmu_pages_lock); 326 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 327 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 328 329 out: 330 return __pa(root->spt); 331 } 332 333 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 334 u64 old_spte, u64 new_spte, int level, 335 bool shared); 336 337 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 338 { 339 kvm_account_pgtable_pages((void *)sp->spt, +1); 340 atomic64_inc(&kvm->arch.tdp_mmu_pages); 341 } 342 343 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 344 { 345 kvm_account_pgtable_pages((void *)sp->spt, -1); 346 atomic64_dec(&kvm->arch.tdp_mmu_pages); 347 } 348 349 /** 350 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages 351 * 352 * @kvm: kvm instance 353 * @sp: the page to be removed 354 * @shared: This operation may not be running under the exclusive use of 355 * the MMU lock and the operation must synchronize with other 356 * threads that might be adding or removing pages. 357 */ 358 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, 359 bool shared) 360 { 361 tdp_unaccount_mmu_page(kvm, sp); 362 363 if (!sp->nx_huge_page_disallowed) 364 return; 365 366 if (shared) 367 spin_lock(&kvm->arch.tdp_mmu_pages_lock); 368 else 369 lockdep_assert_held_write(&kvm->mmu_lock); 370 371 sp->nx_huge_page_disallowed = false; 372 untrack_possible_nx_huge_page(kvm, sp); 373 374 if (shared) 375 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 376 } 377 378 /** 379 * handle_removed_pt() - handle a page table removed from the TDP structure 380 * 381 * @kvm: kvm instance 382 * @pt: the page removed from the paging structure 383 * @shared: This operation may not be running under the exclusive use 384 * of the MMU lock and the operation must synchronize with other 385 * threads that might be modifying SPTEs. 386 * 387 * Given a page table that has been removed from the TDP paging structure, 388 * iterates through the page table to clear SPTEs and free child page tables. 389 * 390 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 391 * protection. Since this thread removed it from the paging structure, 392 * this thread will be responsible for ensuring the page is freed. Hence the 393 * early rcu_dereferences in the function. 394 */ 395 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) 396 { 397 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 398 int level = sp->role.level; 399 gfn_t base_gfn = sp->gfn; 400 int i; 401 402 trace_kvm_mmu_prepare_zap_page(sp); 403 404 tdp_mmu_unlink_sp(kvm, sp, shared); 405 406 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { 407 tdp_ptep_t sptep = pt + i; 408 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 409 u64 old_spte; 410 411 if (shared) { 412 /* 413 * Set the SPTE to a nonpresent value that other 414 * threads will not overwrite. If the SPTE was 415 * already marked as removed then another thread 416 * handling a page fault could overwrite it, so 417 * set the SPTE until it is set from some other 418 * value to the removed SPTE value. 419 */ 420 for (;;) { 421 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); 422 if (!is_removed_spte(old_spte)) 423 break; 424 cpu_relax(); 425 } 426 } else { 427 /* 428 * If the SPTE is not MMU-present, there is no backing 429 * page associated with the SPTE and so no side effects 430 * that need to be recorded, and exclusive ownership of 431 * mmu_lock ensures the SPTE can't be made present. 432 * Note, zapping MMIO SPTEs is also unnecessary as they 433 * are guarded by the memslots generation, not by being 434 * unreachable. 435 */ 436 old_spte = kvm_tdp_mmu_read_spte(sptep); 437 if (!is_shadow_present_pte(old_spte)) 438 continue; 439 440 /* 441 * Use the common helper instead of a raw WRITE_ONCE as 442 * the SPTE needs to be updated atomically if it can be 443 * modified by a different vCPU outside of mmu_lock. 444 * Even though the parent SPTE is !PRESENT, the TLB 445 * hasn't yet been flushed, and both Intel and AMD 446 * document that A/D assists can use upper-level PxE 447 * entries that are cached in the TLB, i.e. the CPU can 448 * still access the page and mark it dirty. 449 * 450 * No retry is needed in the atomic update path as the 451 * sole concern is dropping a Dirty bit, i.e. no other 452 * task can zap/remove the SPTE as mmu_lock is held for 453 * write. Marking the SPTE as a removed SPTE is not 454 * strictly necessary for the same reason, but using 455 * the remove SPTE value keeps the shared/exclusive 456 * paths consistent and allows the handle_changed_spte() 457 * call below to hardcode the new value to REMOVED_SPTE. 458 * 459 * Note, even though dropping a Dirty bit is the only 460 * scenario where a non-atomic update could result in a 461 * functional bug, simply checking the Dirty bit isn't 462 * sufficient as a fast page fault could read the upper 463 * level SPTE before it is zapped, and then make this 464 * target SPTE writable, resume the guest, and set the 465 * Dirty bit between reading the SPTE above and writing 466 * it here. 467 */ 468 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, 469 REMOVED_SPTE, level); 470 } 471 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 472 old_spte, REMOVED_SPTE, level, shared); 473 } 474 475 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 476 } 477 478 /** 479 * handle_changed_spte - handle bookkeeping associated with an SPTE change 480 * @kvm: kvm instance 481 * @as_id: the address space of the paging structure the SPTE was a part of 482 * @gfn: the base GFN that was mapped by the SPTE 483 * @old_spte: The value of the SPTE before the change 484 * @new_spte: The value of the SPTE after the change 485 * @level: the level of the PT the SPTE is part of in the paging structure 486 * @shared: This operation may not be running under the exclusive use of 487 * the MMU lock and the operation must synchronize with other 488 * threads that might be modifying SPTEs. 489 * 490 * Handle bookkeeping that might result from the modification of a SPTE. Note, 491 * dirty logging updates are handled in common code, not here (see make_spte() 492 * and fast_pf_fix_direct_spte()). 493 */ 494 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 495 u64 old_spte, u64 new_spte, int level, 496 bool shared) 497 { 498 bool was_present = is_shadow_present_pte(old_spte); 499 bool is_present = is_shadow_present_pte(new_spte); 500 bool was_leaf = was_present && is_last_spte(old_spte, level); 501 bool is_leaf = is_present && is_last_spte(new_spte, level); 502 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 503 504 WARN_ON(level > PT64_ROOT_MAX_LEVEL); 505 WARN_ON(level < PG_LEVEL_4K); 506 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 507 508 /* 509 * If this warning were to trigger it would indicate that there was a 510 * missing MMU notifier or a race with some notifier handler. 511 * A present, leaf SPTE should never be directly replaced with another 512 * present leaf SPTE pointing to a different PFN. A notifier handler 513 * should be zapping the SPTE before the main MM's page table is 514 * changed, or the SPTE should be zeroed, and the TLBs flushed by the 515 * thread before replacement. 516 */ 517 if (was_leaf && is_leaf && pfn_changed) { 518 pr_err("Invalid SPTE change: cannot replace a present leaf\n" 519 "SPTE with another present leaf SPTE mapping a\n" 520 "different PFN!\n" 521 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 522 as_id, gfn, old_spte, new_spte, level); 523 524 /* 525 * Crash the host to prevent error propagation and guest data 526 * corruption. 527 */ 528 BUG(); 529 } 530 531 if (old_spte == new_spte) 532 return; 533 534 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 535 536 if (is_leaf) 537 check_spte_writable_invariants(new_spte); 538 539 /* 540 * The only times a SPTE should be changed from a non-present to 541 * non-present state is when an MMIO entry is installed/modified/ 542 * removed. In that case, there is nothing to do here. 543 */ 544 if (!was_present && !is_present) { 545 /* 546 * If this change does not involve a MMIO SPTE or removed SPTE, 547 * it is unexpected. Log the change, though it should not 548 * impact the guest since both the former and current SPTEs 549 * are nonpresent. 550 */ 551 if (WARN_ON(!is_mmio_spte(old_spte) && 552 !is_mmio_spte(new_spte) && 553 !is_removed_spte(new_spte))) 554 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 555 "should not be replaced with another,\n" 556 "different nonpresent SPTE, unless one or both\n" 557 "are MMIO SPTEs, or the new SPTE is\n" 558 "a temporary removed SPTE.\n" 559 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 560 as_id, gfn, old_spte, new_spte, level); 561 return; 562 } 563 564 if (is_leaf != was_leaf) 565 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 566 567 if (was_leaf && is_dirty_spte(old_spte) && 568 (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 569 kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 570 571 /* 572 * Recursively handle child PTs if the change removed a subtree from 573 * the paging structure. Note the WARN on the PFN changing without the 574 * SPTE being converted to a hugepage (leaf) or being zapped. Shadow 575 * pages are kernel allocations and should never be migrated. 576 */ 577 if (was_present && !was_leaf && 578 (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) 579 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); 580 581 if (was_leaf && is_accessed_spte(old_spte) && 582 (!is_present || !is_accessed_spte(new_spte) || pfn_changed)) 583 kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 584 } 585 586 /* 587 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 588 * and handle the associated bookkeeping. Do not mark the page dirty 589 * in KVM's dirty bitmaps. 590 * 591 * If setting the SPTE fails because it has changed, iter->old_spte will be 592 * refreshed to the current value of the spte. 593 * 594 * @kvm: kvm instance 595 * @iter: a tdp_iter instance currently on the SPTE that should be set 596 * @new_spte: The value the SPTE should be set to 597 * Return: 598 * * 0 - If the SPTE was set. 599 * * -EBUSY - If the SPTE cannot be set. In this case this function will have 600 * no side-effects other than setting iter->old_spte to the last 601 * known value of the spte. 602 */ 603 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, 604 struct tdp_iter *iter, 605 u64 new_spte) 606 { 607 u64 *sptep = rcu_dereference(iter->sptep); 608 609 /* 610 * The caller is responsible for ensuring the old SPTE is not a REMOVED 611 * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, 612 * and pre-checking before inserting a new SPTE is advantageous as it 613 * avoids unnecessary work. 614 */ 615 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); 616 617 lockdep_assert_held_read(&kvm->mmu_lock); 618 619 /* 620 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 621 * does not hold the mmu_lock. 622 */ 623 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) 624 return -EBUSY; 625 626 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 627 new_spte, iter->level, true); 628 629 return 0; 630 } 631 632 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, 633 struct tdp_iter *iter) 634 { 635 int ret; 636 637 /* 638 * Freeze the SPTE by setting it to a special, 639 * non-present value. This will stop other threads from 640 * immediately installing a present entry in its place 641 * before the TLBs are flushed. 642 */ 643 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); 644 if (ret) 645 return ret; 646 647 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); 648 649 /* 650 * No other thread can overwrite the removed SPTE as they must either 651 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not 652 * overwrite the special removed SPTE value. No bookkeeping is needed 653 * here since the SPTE is going from non-present to non-present. Use 654 * the raw write helper to avoid an unnecessary check on volatile bits. 655 */ 656 __kvm_tdp_mmu_write_spte(iter->sptep, 0); 657 658 return 0; 659 } 660 661 662 /* 663 * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 664 * @kvm: KVM instance 665 * @as_id: Address space ID, i.e. regular vs. SMM 666 * @sptep: Pointer to the SPTE 667 * @old_spte: The current value of the SPTE 668 * @new_spte: The new value that will be set for the SPTE 669 * @gfn: The base GFN that was (or will be) mapped by the SPTE 670 * @level: The level _containing_ the SPTE (its parent PT's level) 671 * 672 * Returns the old SPTE value, which _may_ be different than @old_spte if the 673 * SPTE had voldatile bits. 674 */ 675 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, 676 u64 old_spte, u64 new_spte, gfn_t gfn, int level) 677 { 678 lockdep_assert_held_write(&kvm->mmu_lock); 679 680 /* 681 * No thread should be using this function to set SPTEs to or from the 682 * temporary removed SPTE value. 683 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 684 * should be used. If operating under the MMU lock in write mode, the 685 * use of the removed SPTE should not be necessary. 686 */ 687 WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); 688 689 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); 690 691 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); 692 return old_spte; 693 } 694 695 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, 696 u64 new_spte) 697 { 698 WARN_ON_ONCE(iter->yielded); 699 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, 700 iter->old_spte, new_spte, 701 iter->gfn, iter->level); 702 } 703 704 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 705 for_each_tdp_pte(_iter, _root, _start, _end) 706 707 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 708 tdp_root_for_each_pte(_iter, _root, _start, _end) \ 709 if (!is_shadow_present_pte(_iter.old_spte) || \ 710 !is_last_spte(_iter.old_spte, _iter.level)) \ 711 continue; \ 712 else 713 714 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 715 for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) 716 717 /* 718 * Yield if the MMU lock is contended or this thread needs to return control 719 * to the scheduler. 720 * 721 * If this function should yield and flush is set, it will perform a remote 722 * TLB flush before yielding. 723 * 724 * If this function yields, iter->yielded is set and the caller must skip to 725 * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 726 * over the paging structures to allow the iterator to continue its traversal 727 * from the paging structure root. 728 * 729 * Returns true if this function yielded. 730 */ 731 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 732 struct tdp_iter *iter, 733 bool flush, bool shared) 734 { 735 WARN_ON(iter->yielded); 736 737 /* Ensure forward progress has been made before yielding. */ 738 if (iter->next_last_level_gfn == iter->yielded_gfn) 739 return false; 740 741 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 742 if (flush) 743 kvm_flush_remote_tlbs(kvm); 744 745 rcu_read_unlock(); 746 747 if (shared) 748 cond_resched_rwlock_read(&kvm->mmu_lock); 749 else 750 cond_resched_rwlock_write(&kvm->mmu_lock); 751 752 rcu_read_lock(); 753 754 WARN_ON(iter->gfn > iter->next_last_level_gfn); 755 756 iter->yielded = true; 757 } 758 759 return iter->yielded; 760 } 761 762 static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 763 { 764 /* 765 * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 766 * a gpa range that would exceed the max gfn, and KVM does not create 767 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 768 * the slow emulation path every time. 769 */ 770 return kvm_mmu_max_gfn() + 1; 771 } 772 773 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 774 bool shared, int zap_level) 775 { 776 struct tdp_iter iter; 777 778 gfn_t end = tdp_mmu_max_gfn_exclusive(); 779 gfn_t start = 0; 780 781 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { 782 retry: 783 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 784 continue; 785 786 if (!is_shadow_present_pte(iter.old_spte)) 787 continue; 788 789 if (iter.level > zap_level) 790 continue; 791 792 if (!shared) 793 tdp_mmu_iter_set_spte(kvm, &iter, 0); 794 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) 795 goto retry; 796 } 797 } 798 799 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 800 bool shared) 801 { 802 803 /* 804 * The root must have an elevated refcount so that it's reachable via 805 * mmu_notifier callbacks, which allows this path to yield and drop 806 * mmu_lock. When handling an unmap/release mmu_notifier command, KVM 807 * must drop all references to relevant pages prior to completing the 808 * callback. Dropping mmu_lock with an unreachable root would result 809 * in zapping SPTEs after a relevant mmu_notifier callback completes 810 * and lead to use-after-free as zapping a SPTE triggers "writeback" of 811 * dirty accessed bits to the SPTE's associated struct page. 812 */ 813 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); 814 815 kvm_lockdep_assert_mmu_lock_held(kvm, shared); 816 817 rcu_read_lock(); 818 819 /* 820 * To avoid RCU stalls due to recursively removing huge swaths of SPs, 821 * split the zap into two passes. On the first pass, zap at the 1gb 822 * level, and then zap top-level SPs on the second pass. "1gb" is not 823 * arbitrary, as KVM must be able to zap a 1gb shadow page without 824 * inducing a stall to allow in-place replacement with a 1gb hugepage. 825 * 826 * Because zapping a SP recurses on its children, stepping down to 827 * PG_LEVEL_4K in the iterator itself is unnecessary. 828 */ 829 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); 830 __tdp_mmu_zap_root(kvm, root, shared, root->role.level); 831 832 rcu_read_unlock(); 833 } 834 835 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 836 { 837 u64 old_spte; 838 839 /* 840 * This helper intentionally doesn't allow zapping a root shadow page, 841 * which doesn't have a parent page table and thus no associated entry. 842 */ 843 if (WARN_ON_ONCE(!sp->ptep)) 844 return false; 845 846 old_spte = kvm_tdp_mmu_read_spte(sp->ptep); 847 if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) 848 return false; 849 850 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, 851 sp->gfn, sp->role.level + 1); 852 853 return true; 854 } 855 856 /* 857 * If can_yield is true, will release the MMU lock and reschedule if the 858 * scheduler needs the CPU or there is contention on the MMU lock. If this 859 * function cannot yield, it will not release the MMU lock or reschedule and 860 * the caller must ensure it does not supply too large a GFN range, or the 861 * operation can cause a soft lockup. 862 */ 863 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, 864 gfn_t start, gfn_t end, bool can_yield, bool flush) 865 { 866 struct tdp_iter iter; 867 868 end = min(end, tdp_mmu_max_gfn_exclusive()); 869 870 lockdep_assert_held_write(&kvm->mmu_lock); 871 872 rcu_read_lock(); 873 874 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { 875 if (can_yield && 876 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { 877 flush = false; 878 continue; 879 } 880 881 if (!is_shadow_present_pte(iter.old_spte) || 882 !is_last_spte(iter.old_spte, iter.level)) 883 continue; 884 885 tdp_mmu_iter_set_spte(kvm, &iter, 0); 886 flush = true; 887 } 888 889 rcu_read_unlock(); 890 891 /* 892 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need 893 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. 894 */ 895 return flush; 896 } 897 898 /* 899 * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns 900 * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or 901 * more SPTEs were zapped since the MMU lock was last acquired. 902 */ 903 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, 904 bool can_yield, bool flush) 905 { 906 struct kvm_mmu_page *root; 907 908 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) 909 flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); 910 911 return flush; 912 } 913 914 void kvm_tdp_mmu_zap_all(struct kvm *kvm) 915 { 916 struct kvm_mmu_page *root; 917 int i; 918 919 /* 920 * Zap all roots, including invalid roots, as all SPTEs must be dropped 921 * before returning to the caller. Zap directly even if the root is 922 * also being zapped by a worker. Walking zapped top-level SPTEs isn't 923 * all that expensive and mmu_lock is already held, which means the 924 * worker has yielded, i.e. flushing the work instead of zapping here 925 * isn't guaranteed to be any faster. 926 * 927 * A TLB flush is unnecessary, KVM zaps everything if and only the VM 928 * is being destroyed or the userspace VMM has exited. In both cases, 929 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. 930 */ 931 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 932 for_each_tdp_mmu_root_yield_safe(kvm, root, i) 933 tdp_mmu_zap_root(kvm, root, false); 934 } 935 } 936 937 /* 938 * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast 939 * zap" completes. 940 */ 941 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 942 { 943 flush_workqueue(kvm->arch.tdp_mmu_zap_wq); 944 } 945 946 /* 947 * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that 948 * is about to be zapped, e.g. in response to a memslots update. The actual 949 * zapping is performed asynchronously, so a reference is taken on all roots. 950 * Using a separate workqueue makes it easy to ensure that the destruction is 951 * performed before the "fast zap" completes, without keeping a separate list 952 * of invalidated roots; the list is effectively the list of work items in 953 * the workqueue. 954 * 955 * Get a reference even if the root is already invalid, the asynchronous worker 956 * assumes it was gifted a reference to the root it processes. Because mmu_lock 957 * is held for write, it should be impossible to observe a root with zero refcount, 958 * i.e. the list of roots cannot be stale. 959 * 960 * This has essentially the same effect for the TDP MMU 961 * as updating mmu_valid_gen does for the shadow MMU. 962 */ 963 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 964 { 965 struct kvm_mmu_page *root; 966 967 lockdep_assert_held_write(&kvm->mmu_lock); 968 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { 969 if (!root->role.invalid && 970 !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { 971 root->role.invalid = true; 972 tdp_mmu_schedule_zap_root(kvm, root); 973 } 974 } 975 } 976 977 /* 978 * Installs a last-level SPTE to handle a TDP page fault. 979 * (NPT/EPT violation/misconfiguration) 980 */ 981 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 982 struct kvm_page_fault *fault, 983 struct tdp_iter *iter) 984 { 985 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 986 u64 new_spte; 987 int ret = RET_PF_FIXED; 988 bool wrprot = false; 989 990 if (WARN_ON_ONCE(sp->role.level != fault->goal_level)) 991 return RET_PF_RETRY; 992 993 if (unlikely(!fault->slot)) 994 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 995 else 996 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 997 fault->pfn, iter->old_spte, fault->prefetch, true, 998 fault->map_writable, &new_spte); 999 1000 if (new_spte == iter->old_spte) 1001 ret = RET_PF_SPURIOUS; 1002 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 1003 return RET_PF_RETRY; 1004 else if (is_shadow_present_pte(iter->old_spte) && 1005 !is_last_spte(iter->old_spte, iter->level)) 1006 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); 1007 1008 /* 1009 * If the page fault was caused by a write but the page is write 1010 * protected, emulation is needed. If the emulation was skipped, 1011 * the vCPU would have the same fault again. 1012 */ 1013 if (wrprot) { 1014 if (fault->write) 1015 ret = RET_PF_EMULATE; 1016 } 1017 1018 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 1019 if (unlikely(is_mmio_spte(new_spte))) { 1020 vcpu->stat.pf_mmio_spte_created++; 1021 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 1022 new_spte); 1023 ret = RET_PF_EMULATE; 1024 } else { 1025 trace_kvm_mmu_set_spte(iter->level, iter->gfn, 1026 rcu_dereference(iter->sptep)); 1027 } 1028 1029 return ret; 1030 } 1031 1032 /* 1033 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1034 * provided page table. 1035 * 1036 * @kvm: kvm instance 1037 * @iter: a tdp_iter instance currently on the SPTE that should be set 1038 * @sp: The new TDP page table to install. 1039 * @shared: This operation is running under the MMU lock in read mode. 1040 * 1041 * Returns: 0 if the new page table was installed. Non-0 if the page table 1042 * could not be installed (e.g. the atomic compare-exchange failed). 1043 */ 1044 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, 1045 struct kvm_mmu_page *sp, bool shared) 1046 { 1047 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); 1048 int ret = 0; 1049 1050 if (shared) { 1051 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); 1052 if (ret) 1053 return ret; 1054 } else { 1055 tdp_mmu_iter_set_spte(kvm, iter, spte); 1056 } 1057 1058 tdp_account_mmu_page(kvm, sp); 1059 1060 return 0; 1061 } 1062 1063 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1064 struct kvm_mmu_page *sp, bool shared); 1065 1066 /* 1067 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 1068 * page tables and SPTEs to translate the faulting guest physical address. 1069 */ 1070 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 1071 { 1072 struct kvm_mmu *mmu = vcpu->arch.mmu; 1073 struct kvm *kvm = vcpu->kvm; 1074 struct tdp_iter iter; 1075 struct kvm_mmu_page *sp; 1076 int ret = RET_PF_RETRY; 1077 1078 kvm_mmu_hugepage_adjust(vcpu, fault); 1079 1080 trace_kvm_mmu_spte_requested(fault); 1081 1082 rcu_read_lock(); 1083 1084 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 1085 int r; 1086 1087 if (fault->nx_huge_page_workaround_enabled) 1088 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 1089 1090 /* 1091 * If SPTE has been frozen by another thread, just give up and 1092 * retry, avoiding unnecessary page table allocation and free. 1093 */ 1094 if (is_removed_spte(iter.old_spte)) 1095 goto retry; 1096 1097 if (iter.level == fault->goal_level) 1098 goto map_target_level; 1099 1100 /* Step down into the lower level page table if it exists. */ 1101 if (is_shadow_present_pte(iter.old_spte) && 1102 !is_large_pte(iter.old_spte)) 1103 continue; 1104 1105 /* 1106 * The SPTE is either non-present or points to a huge page that 1107 * needs to be split. 1108 */ 1109 sp = tdp_mmu_alloc_sp(vcpu); 1110 tdp_mmu_init_child_sp(sp, &iter); 1111 1112 sp->nx_huge_page_disallowed = fault->huge_page_disallowed; 1113 1114 if (is_shadow_present_pte(iter.old_spte)) 1115 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); 1116 else 1117 r = tdp_mmu_link_sp(kvm, &iter, sp, true); 1118 1119 /* 1120 * Force the guest to retry if installing an upper level SPTE 1121 * failed, e.g. because a different task modified the SPTE. 1122 */ 1123 if (r) { 1124 tdp_mmu_free_sp(sp); 1125 goto retry; 1126 } 1127 1128 if (fault->huge_page_disallowed && 1129 fault->req_level >= iter.level) { 1130 spin_lock(&kvm->arch.tdp_mmu_pages_lock); 1131 if (sp->nx_huge_page_disallowed) 1132 track_possible_nx_huge_page(kvm, sp); 1133 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 1134 } 1135 } 1136 1137 /* 1138 * The walk aborted before reaching the target level, e.g. because the 1139 * iterator detected an upper level SPTE was frozen during traversal. 1140 */ 1141 WARN_ON_ONCE(iter.level == fault->goal_level); 1142 goto retry; 1143 1144 map_target_level: 1145 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 1146 1147 retry: 1148 rcu_read_unlock(); 1149 return ret; 1150 } 1151 1152 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 1153 bool flush) 1154 { 1155 return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, 1156 range->end, range->may_block, flush); 1157 } 1158 1159 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 1160 struct kvm_gfn_range *range); 1161 1162 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 1163 struct kvm_gfn_range *range, 1164 tdp_handler_t handler) 1165 { 1166 struct kvm_mmu_page *root; 1167 struct tdp_iter iter; 1168 bool ret = false; 1169 1170 /* 1171 * Don't support rescheduling, none of the MMU notifiers that funnel 1172 * into this helper allow blocking; it'd be dead, wasteful code. 1173 */ 1174 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 1175 rcu_read_lock(); 1176 1177 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 1178 ret |= handler(kvm, &iter, range); 1179 1180 rcu_read_unlock(); 1181 } 1182 1183 return ret; 1184 } 1185 1186 /* 1187 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1188 * if any of the GFNs in the range have been accessed. 1189 * 1190 * No need to mark the corresponding PFN as accessed as this call is coming 1191 * from the clear_young() or clear_flush_young() notifier, which uses the 1192 * return value to determine if the page has been accessed. 1193 */ 1194 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 1195 struct kvm_gfn_range *range) 1196 { 1197 u64 new_spte; 1198 1199 /* If we have a non-accessed entry we don't need to change the pte. */ 1200 if (!is_accessed_spte(iter->old_spte)) 1201 return false; 1202 1203 if (spte_ad_enabled(iter->old_spte)) { 1204 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep, 1205 iter->old_spte, 1206 shadow_accessed_mask, 1207 iter->level); 1208 new_spte = iter->old_spte & ~shadow_accessed_mask; 1209 } else { 1210 /* 1211 * Capture the dirty status of the page, so that it doesn't get 1212 * lost when the SPTE is marked for access tracking. 1213 */ 1214 if (is_writable_pte(iter->old_spte)) 1215 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte)); 1216 1217 new_spte = mark_spte_for_access_track(iter->old_spte); 1218 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep, 1219 iter->old_spte, new_spte, 1220 iter->level); 1221 } 1222 1223 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level, 1224 iter->old_spte, new_spte); 1225 return true; 1226 } 1227 1228 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1229 { 1230 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1231 } 1232 1233 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 1234 struct kvm_gfn_range *range) 1235 { 1236 return is_accessed_spte(iter->old_spte); 1237 } 1238 1239 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1240 { 1241 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 1242 } 1243 1244 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 1245 struct kvm_gfn_range *range) 1246 { 1247 u64 new_spte; 1248 1249 /* Huge pages aren't expected to be modified without first being zapped. */ 1250 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 1251 1252 if (iter->level != PG_LEVEL_4K || 1253 !is_shadow_present_pte(iter->old_spte)) 1254 return false; 1255 1256 /* 1257 * Note, when changing a read-only SPTE, it's not strictly necessary to 1258 * zero the SPTE before setting the new PFN, but doing so preserves the 1259 * invariant that the PFN of a present * leaf SPTE can never change. 1260 * See handle_changed_spte(). 1261 */ 1262 tdp_mmu_iter_set_spte(kvm, iter, 0); 1263 1264 if (!pte_write(range->pte)) { 1265 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 1266 pte_pfn(range->pte)); 1267 1268 tdp_mmu_iter_set_spte(kvm, iter, new_spte); 1269 } 1270 1271 return true; 1272 } 1273 1274 /* 1275 * Handle the changed_pte MMU notifier for the TDP MMU. 1276 * data is a pointer to the new pte_t mapping the HVA specified by the MMU 1277 * notifier. 1278 * Returns non-zero if a flush is needed before releasing the MMU lock. 1279 */ 1280 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1281 { 1282 /* 1283 * No need to handle the remote TLB flush under RCU protection, the 1284 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a 1285 * shadow page. See the WARN on pfn_changed in handle_changed_spte(). 1286 */ 1287 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 1288 } 1289 1290 /* 1291 * Remove write access from all SPTEs at or above min_level that map GFNs 1292 * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1293 * be flushed. 1294 */ 1295 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1296 gfn_t start, gfn_t end, int min_level) 1297 { 1298 struct tdp_iter iter; 1299 u64 new_spte; 1300 bool spte_set = false; 1301 1302 rcu_read_lock(); 1303 1304 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1305 1306 for_each_tdp_pte_min_level(iter, root, min_level, start, end) { 1307 retry: 1308 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 1309 continue; 1310 1311 if (!is_shadow_present_pte(iter.old_spte) || 1312 !is_last_spte(iter.old_spte, iter.level) || 1313 !(iter.old_spte & PT_WRITABLE_MASK)) 1314 continue; 1315 1316 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1317 1318 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 1319 goto retry; 1320 1321 spte_set = true; 1322 } 1323 1324 rcu_read_unlock(); 1325 return spte_set; 1326 } 1327 1328 /* 1329 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1330 * only affect leaf SPTEs down to min_level. 1331 * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1332 */ 1333 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1334 const struct kvm_memory_slot *slot, int min_level) 1335 { 1336 struct kvm_mmu_page *root; 1337 bool spte_set = false; 1338 1339 lockdep_assert_held_read(&kvm->mmu_lock); 1340 1341 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1342 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1343 slot->base_gfn + slot->npages, min_level); 1344 1345 return spte_set; 1346 } 1347 1348 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) 1349 { 1350 struct kvm_mmu_page *sp; 1351 1352 gfp |= __GFP_ZERO; 1353 1354 sp = kmem_cache_alloc(mmu_page_header_cache, gfp); 1355 if (!sp) 1356 return NULL; 1357 1358 sp->spt = (void *)__get_free_page(gfp); 1359 if (!sp->spt) { 1360 kmem_cache_free(mmu_page_header_cache, sp); 1361 return NULL; 1362 } 1363 1364 return sp; 1365 } 1366 1367 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, 1368 struct tdp_iter *iter, 1369 bool shared) 1370 { 1371 struct kvm_mmu_page *sp; 1372 1373 /* 1374 * Since we are allocating while under the MMU lock we have to be 1375 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct 1376 * reclaim and to avoid making any filesystem callbacks (which can end 1377 * up invoking KVM MMU notifiers, resulting in a deadlock). 1378 * 1379 * If this allocation fails we drop the lock and retry with reclaim 1380 * allowed. 1381 */ 1382 sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); 1383 if (sp) 1384 return sp; 1385 1386 rcu_read_unlock(); 1387 1388 if (shared) 1389 read_unlock(&kvm->mmu_lock); 1390 else 1391 write_unlock(&kvm->mmu_lock); 1392 1393 iter->yielded = true; 1394 sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); 1395 1396 if (shared) 1397 read_lock(&kvm->mmu_lock); 1398 else 1399 write_lock(&kvm->mmu_lock); 1400 1401 rcu_read_lock(); 1402 1403 return sp; 1404 } 1405 1406 /* Note, the caller is responsible for initializing @sp. */ 1407 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1408 struct kvm_mmu_page *sp, bool shared) 1409 { 1410 const u64 huge_spte = iter->old_spte; 1411 const int level = iter->level; 1412 int ret, i; 1413 1414 /* 1415 * No need for atomics when writing to sp->spt since the page table has 1416 * not been linked in yet and thus is not reachable from any other CPU. 1417 */ 1418 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) 1419 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); 1420 1421 /* 1422 * Replace the huge spte with a pointer to the populated lower level 1423 * page table. Since we are making this change without a TLB flush vCPUs 1424 * will see a mix of the split mappings and the original huge mapping, 1425 * depending on what's currently in their TLB. This is fine from a 1426 * correctness standpoint since the translation will be the same either 1427 * way. 1428 */ 1429 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); 1430 if (ret) 1431 goto out; 1432 1433 /* 1434 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we 1435 * are overwriting from the page stats. But we have to manually update 1436 * the page stats with the new present child pages. 1437 */ 1438 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); 1439 1440 out: 1441 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); 1442 return ret; 1443 } 1444 1445 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, 1446 struct kvm_mmu_page *root, 1447 gfn_t start, gfn_t end, 1448 int target_level, bool shared) 1449 { 1450 struct kvm_mmu_page *sp = NULL; 1451 struct tdp_iter iter; 1452 int ret = 0; 1453 1454 rcu_read_lock(); 1455 1456 /* 1457 * Traverse the page table splitting all huge pages above the target 1458 * level into one lower level. For example, if we encounter a 1GB page 1459 * we split it into 512 2MB pages. 1460 * 1461 * Since the TDP iterator uses a pre-order traversal, we are guaranteed 1462 * to visit an SPTE before ever visiting its children, which means we 1463 * will correctly recursively split huge pages that are more than one 1464 * level above the target level (e.g. splitting a 1GB to 512 2MB pages, 1465 * and then splitting each of those to 512 4KB pages). 1466 */ 1467 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { 1468 retry: 1469 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 1470 continue; 1471 1472 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) 1473 continue; 1474 1475 if (!sp) { 1476 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); 1477 if (!sp) { 1478 ret = -ENOMEM; 1479 trace_kvm_mmu_split_huge_page(iter.gfn, 1480 iter.old_spte, 1481 iter.level, ret); 1482 break; 1483 } 1484 1485 if (iter.yielded) 1486 continue; 1487 } 1488 1489 tdp_mmu_init_child_sp(sp, &iter); 1490 1491 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) 1492 goto retry; 1493 1494 sp = NULL; 1495 } 1496 1497 rcu_read_unlock(); 1498 1499 /* 1500 * It's possible to exit the loop having never used the last sp if, for 1501 * example, a vCPU doing HugePage NX splitting wins the race and 1502 * installs its own sp in place of the last sp we tried to split. 1503 */ 1504 if (sp) 1505 tdp_mmu_free_sp(sp); 1506 1507 return ret; 1508 } 1509 1510 1511 /* 1512 * Try to split all huge pages mapped by the TDP MMU down to the target level. 1513 */ 1514 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 1515 const struct kvm_memory_slot *slot, 1516 gfn_t start, gfn_t end, 1517 int target_level, bool shared) 1518 { 1519 struct kvm_mmu_page *root; 1520 int r = 0; 1521 1522 kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1523 1524 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { 1525 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); 1526 if (r) { 1527 kvm_tdp_mmu_put_root(kvm, root, shared); 1528 break; 1529 } 1530 } 1531 } 1532 1533 /* 1534 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1535 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1536 * If AD bits are not enabled, this will require clearing the writable bit on 1537 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1538 * be flushed. 1539 */ 1540 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1541 gfn_t start, gfn_t end) 1542 { 1543 u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK; 1544 struct tdp_iter iter; 1545 bool spte_set = false; 1546 1547 rcu_read_lock(); 1548 1549 tdp_root_for_each_leaf_pte(iter, root, start, end) { 1550 retry: 1551 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 1552 continue; 1553 1554 if (!is_shadow_present_pte(iter.old_spte)) 1555 continue; 1556 1557 MMU_WARN_ON(kvm_ad_enabled() && 1558 spte_ad_need_write_protect(iter.old_spte)); 1559 1560 if (!(iter.old_spte & dbit)) 1561 continue; 1562 1563 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) 1564 goto retry; 1565 1566 spte_set = true; 1567 } 1568 1569 rcu_read_unlock(); 1570 return spte_set; 1571 } 1572 1573 /* 1574 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1575 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1576 * If AD bits are not enabled, this will require clearing the writable bit on 1577 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1578 * be flushed. 1579 */ 1580 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1581 const struct kvm_memory_slot *slot) 1582 { 1583 struct kvm_mmu_page *root; 1584 bool spte_set = false; 1585 1586 lockdep_assert_held_read(&kvm->mmu_lock); 1587 1588 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1589 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1590 slot->base_gfn + slot->npages); 1591 1592 return spte_set; 1593 } 1594 1595 /* 1596 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1597 * set in mask, starting at gfn. The given memslot is expected to contain all 1598 * the GFNs represented by set bits in the mask. If AD bits are enabled, 1599 * clearing the dirty status will involve clearing the dirty bit on each SPTE 1600 * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1601 */ 1602 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1603 gfn_t gfn, unsigned long mask, bool wrprot) 1604 { 1605 u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK : 1606 shadow_dirty_mask; 1607 struct tdp_iter iter; 1608 1609 rcu_read_lock(); 1610 1611 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1612 gfn + BITS_PER_LONG) { 1613 if (!mask) 1614 break; 1615 1616 MMU_WARN_ON(kvm_ad_enabled() && 1617 spte_ad_need_write_protect(iter.old_spte)); 1618 1619 if (iter.level > PG_LEVEL_4K || 1620 !(mask & (1UL << (iter.gfn - gfn)))) 1621 continue; 1622 1623 mask &= ~(1UL << (iter.gfn - gfn)); 1624 1625 if (!(iter.old_spte & dbit)) 1626 continue; 1627 1628 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep, 1629 iter.old_spte, dbit, 1630 iter.level); 1631 1632 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level, 1633 iter.old_spte, 1634 iter.old_spte & ~dbit); 1635 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte)); 1636 } 1637 1638 rcu_read_unlock(); 1639 } 1640 1641 /* 1642 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1643 * set in mask, starting at gfn. The given memslot is expected to contain all 1644 * the GFNs represented by set bits in the mask. If AD bits are enabled, 1645 * clearing the dirty status will involve clearing the dirty bit on each SPTE 1646 * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1647 */ 1648 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1649 struct kvm_memory_slot *slot, 1650 gfn_t gfn, unsigned long mask, 1651 bool wrprot) 1652 { 1653 struct kvm_mmu_page *root; 1654 1655 lockdep_assert_held_write(&kvm->mmu_lock); 1656 for_each_tdp_mmu_root(kvm, root, slot->as_id) 1657 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1658 } 1659 1660 static void zap_collapsible_spte_range(struct kvm *kvm, 1661 struct kvm_mmu_page *root, 1662 const struct kvm_memory_slot *slot) 1663 { 1664 gfn_t start = slot->base_gfn; 1665 gfn_t end = start + slot->npages; 1666 struct tdp_iter iter; 1667 int max_mapping_level; 1668 1669 rcu_read_lock(); 1670 1671 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { 1672 retry: 1673 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 1674 continue; 1675 1676 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || 1677 !is_shadow_present_pte(iter.old_spte)) 1678 continue; 1679 1680 /* 1681 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with 1682 * a large page size, then its parent would have been zapped 1683 * instead of stepping down. 1684 */ 1685 if (is_last_spte(iter.old_spte, iter.level)) 1686 continue; 1687 1688 /* 1689 * If iter.gfn resides outside of the slot, i.e. the page for 1690 * the current level overlaps but is not contained by the slot, 1691 * then the SPTE can't be made huge. More importantly, trying 1692 * to query that info from slot->arch.lpage_info will cause an 1693 * out-of-bounds access. 1694 */ 1695 if (iter.gfn < start || iter.gfn >= end) 1696 continue; 1697 1698 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, 1699 iter.gfn, PG_LEVEL_NUM); 1700 if (max_mapping_level < iter.level) 1701 continue; 1702 1703 /* Note, a successful atomic zap also does a remote TLB flush. */ 1704 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) 1705 goto retry; 1706 } 1707 1708 rcu_read_unlock(); 1709 } 1710 1711 /* 1712 * Zap non-leaf SPTEs (and free their associated page tables) which could 1713 * be replaced by huge pages, for GFNs within the slot. 1714 */ 1715 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 1716 const struct kvm_memory_slot *slot) 1717 { 1718 struct kvm_mmu_page *root; 1719 1720 lockdep_assert_held_read(&kvm->mmu_lock); 1721 1722 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1723 zap_collapsible_spte_range(kvm, root, slot); 1724 } 1725 1726 /* 1727 * Removes write access on the last level SPTE mapping this GFN and unsets the 1728 * MMU-writable bit to ensure future writes continue to be intercepted. 1729 * Returns true if an SPTE was set and a TLB flush is needed. 1730 */ 1731 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 1732 gfn_t gfn, int min_level) 1733 { 1734 struct tdp_iter iter; 1735 u64 new_spte; 1736 bool spte_set = false; 1737 1738 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1739 1740 rcu_read_lock(); 1741 1742 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { 1743 if (!is_shadow_present_pte(iter.old_spte) || 1744 !is_last_spte(iter.old_spte, iter.level)) 1745 continue; 1746 1747 new_spte = iter.old_spte & 1748 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 1749 1750 if (new_spte == iter.old_spte) 1751 break; 1752 1753 tdp_mmu_iter_set_spte(kvm, &iter, new_spte); 1754 spte_set = true; 1755 } 1756 1757 rcu_read_unlock(); 1758 1759 return spte_set; 1760 } 1761 1762 /* 1763 * Removes write access on the last level SPTE mapping this GFN and unsets the 1764 * MMU-writable bit to ensure future writes continue to be intercepted. 1765 * Returns true if an SPTE was set and a TLB flush is needed. 1766 */ 1767 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 1768 struct kvm_memory_slot *slot, gfn_t gfn, 1769 int min_level) 1770 { 1771 struct kvm_mmu_page *root; 1772 bool spte_set = false; 1773 1774 lockdep_assert_held_write(&kvm->mmu_lock); 1775 for_each_tdp_mmu_root(kvm, root, slot->as_id) 1776 spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1777 1778 return spte_set; 1779 } 1780 1781 /* 1782 * Return the level of the lowest level SPTE added to sptes. 1783 * That SPTE may be non-present. 1784 * 1785 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 1786 */ 1787 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 1788 int *root_level) 1789 { 1790 struct tdp_iter iter; 1791 struct kvm_mmu *mmu = vcpu->arch.mmu; 1792 gfn_t gfn = addr >> PAGE_SHIFT; 1793 int leaf = -1; 1794 1795 *root_level = vcpu->arch.mmu->root_role.level; 1796 1797 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 1798 leaf = iter.level; 1799 sptes[leaf] = iter.old_spte; 1800 } 1801 1802 return leaf; 1803 } 1804 1805 /* 1806 * Returns the last level spte pointer of the shadow page walk for the given 1807 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 1808 * walk could be performed, returns NULL and *spte does not contain valid data. 1809 * 1810 * Contract: 1811 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 1812 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 1813 * 1814 * WARNING: This function is only intended to be called during fast_page_fault. 1815 */ 1816 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 1817 u64 *spte) 1818 { 1819 struct tdp_iter iter; 1820 struct kvm_mmu *mmu = vcpu->arch.mmu; 1821 gfn_t gfn = addr >> PAGE_SHIFT; 1822 tdp_ptep_t sptep = NULL; 1823 1824 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 1825 *spte = iter.old_spte; 1826 sptep = iter.sptep; 1827 } 1828 1829 /* 1830 * Perform the rcu_dereference to get the raw spte pointer value since 1831 * we are passing it up to fast_page_fault, which is shared with the 1832 * legacy MMU and thus does not retain the TDP MMU-specific __rcu 1833 * annotation. 1834 * 1835 * This is safe since fast_page_fault obeys the contracts of this 1836 * function as well as all TDP MMU contracts around modifying SPTEs 1837 * outside of mmu_lock. 1838 */ 1839 return rcu_dereference(sptep); 1840 } 1841