1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 20d501f747SBen Gardon return false; 21fe5db27dSBen Gardon 22fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 23fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 2402c00b3aSBen Gardon 2502c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 269a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 2789c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 28d501f747SBen Gardon 29d501f747SBen Gardon return true; 30fe5db27dSBen Gardon } 31fe5db27dSBen Gardon 326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 336103bc07SBen Gardon bool shared) 346103bc07SBen Gardon { 356103bc07SBen Gardon if (shared) 366103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 376103bc07SBen Gardon else 386103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 396103bc07SBen Gardon } 406103bc07SBen Gardon 41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42fe5db27dSBen Gardon { 43fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 44fe5db27dSBen Gardon return; 4502c00b3aSBen Gardon 46524a1e4eSSean Christopherson WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); 4702c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 487cca2d0bSBen Gardon 497cca2d0bSBen Gardon /* 507cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 517cca2d0bSBen Gardon * can run before the VM is torn down. 527cca2d0bSBen Gardon */ 537cca2d0bSBen Gardon rcu_barrier(); 5402c00b3aSBen Gardon } 5502c00b3aSBen Gardon 562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 576103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 586103bc07SBen Gardon bool shared); 592bdb3d84SBen Gardon 602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 61a889ea54SBen Gardon { 622bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 632bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 64a889ea54SBen Gardon } 65a889ea54SBen Gardon 66c0e64238SBen Gardon /* 67c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 68c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 69c0e64238SBen Gardon * the memory. 70c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 71c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 72c0e64238SBen Gardon * memory won't use it after it is freed. 73c0e64238SBen Gardon */ 74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 75a889ea54SBen Gardon { 76c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 77c0e64238SBen Gardon rcu_head); 78a889ea54SBen Gardon 79c0e64238SBen Gardon tdp_mmu_free_sp(sp); 80a889ea54SBen Gardon } 81a889ea54SBen Gardon 826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 836103bc07SBen Gardon bool shared) 842bdb3d84SBen Gardon { 856103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 862bdb3d84SBen Gardon 8711cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 882bdb3d84SBen Gardon return; 892bdb3d84SBen Gardon 902bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 912bdb3d84SBen Gardon 92c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 93c0e64238SBen Gardon list_del_rcu(&root->link); 94c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 952bdb3d84SBen Gardon 96524a1e4eSSean Christopherson zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); 972bdb3d84SBen Gardon 98c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 99a889ea54SBen Gardon } 100a889ea54SBen Gardon 101cfc10997SBen Gardon /* 102cfc10997SBen Gardon * Finds the next valid root after root (or the first valid root if root 103cfc10997SBen Gardon * is NULL), takes a reference on it, and returns that next root. If root 104cfc10997SBen Gardon * is not NULL, this thread should have already taken a reference on it, and 105cfc10997SBen Gardon * that reference will be dropped. If no valid root is found, this 106cfc10997SBen Gardon * function will return NULL. 107cfc10997SBen Gardon */ 108cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 1096103bc07SBen Gardon struct kvm_mmu_page *prev_root, 1106103bc07SBen Gardon bool shared) 111a889ea54SBen Gardon { 112a889ea54SBen Gardon struct kvm_mmu_page *next_root; 113a889ea54SBen Gardon 114c0e64238SBen Gardon rcu_read_lock(); 115c0e64238SBen Gardon 116cfc10997SBen Gardon if (prev_root) 117c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 118c0e64238SBen Gardon &prev_root->link, 119c0e64238SBen Gardon typeof(*prev_root), link); 120cfc10997SBen Gardon else 121c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 122cfc10997SBen Gardon typeof(*next_root), link); 123cfc10997SBen Gardon 124c0e64238SBen Gardon while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) 125c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 126c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 127fb101293SBen Gardon 128c0e64238SBen Gardon rcu_read_unlock(); 129cfc10997SBen Gardon 130cfc10997SBen Gardon if (prev_root) 1316103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 132cfc10997SBen Gardon 133a889ea54SBen Gardon return next_root; 134a889ea54SBen Gardon } 135a889ea54SBen Gardon 136a889ea54SBen Gardon /* 137a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 138a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 139a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 140a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 1416103bc07SBen Gardon * 1426103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 1436103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 1446103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 145a889ea54SBen Gardon */ 1466103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 1476103bc07SBen Gardon for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ 148cfc10997SBen Gardon _root; \ 1496103bc07SBen Gardon _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ 150a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 151a3f15bdaSSean Christopherson } else 152a889ea54SBen Gardon 153a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 154c0e64238SBen Gardon list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ 155c0e64238SBen Gardon lockdep_is_held_type(&kvm->mmu_lock, 0) || \ 156c0e64238SBen Gardon lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ 157a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 158a3f15bdaSSean Christopherson } else 15902c00b3aSBen Gardon 16002c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, 16102c00b3aSBen Gardon int level) 16202c00b3aSBen Gardon { 16302c00b3aSBen Gardon union kvm_mmu_page_role role; 16402c00b3aSBen Gardon 16502c00b3aSBen Gardon role = vcpu->arch.mmu->mmu_role.base; 16602c00b3aSBen Gardon role.level = level; 16702c00b3aSBen Gardon role.direct = true; 16802c00b3aSBen Gardon role.gpte_is_8_bytes = true; 16902c00b3aSBen Gardon role.access = ACC_ALL; 17002c00b3aSBen Gardon 17102c00b3aSBen Gardon return role; 17202c00b3aSBen Gardon } 17302c00b3aSBen Gardon 17402c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, 17502c00b3aSBen Gardon int level) 17602c00b3aSBen Gardon { 17702c00b3aSBen Gardon struct kvm_mmu_page *sp; 17802c00b3aSBen Gardon 17902c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 18002c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 18102c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 18202c00b3aSBen Gardon 18302c00b3aSBen Gardon sp->role.word = page_role_for_level(vcpu, level).word; 18402c00b3aSBen Gardon sp->gfn = gfn; 18502c00b3aSBen Gardon sp->tdp_mmu_page = true; 18602c00b3aSBen Gardon 18733dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 18833dd3574SBen Gardon 18902c00b3aSBen Gardon return sp; 19002c00b3aSBen Gardon } 19102c00b3aSBen Gardon 1926e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 19302c00b3aSBen Gardon { 19402c00b3aSBen Gardon union kvm_mmu_page_role role; 19502c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 19602c00b3aSBen Gardon struct kvm_mmu_page *root; 19702c00b3aSBen Gardon 1986e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 19902c00b3aSBen Gardon 20002c00b3aSBen Gardon role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); 20102c00b3aSBen Gardon 20202c00b3aSBen Gardon /* Check for an existing root before allocating a new one. */ 203a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 204fb101293SBen Gardon if (root->role.word == role.word && 205fb101293SBen Gardon kvm_tdp_mmu_get_root(kvm, root)) 2066e6ec584SSean Christopherson goto out; 20702c00b3aSBen Gardon } 20802c00b3aSBen Gardon 20902c00b3aSBen Gardon root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); 21011cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 21102c00b3aSBen Gardon 212c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 213c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 214c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 21502c00b3aSBen Gardon 2166e6ec584SSean Christopherson out: 21702c00b3aSBen Gardon return __pa(root->spt); 218fe5db27dSBen Gardon } 2192f2fad08SBen Gardon 2202f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2219a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 2229a77daacSBen Gardon bool shared); 2232f2fad08SBen Gardon 224f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 225f8e14497SBen Gardon { 226f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 227f8e14497SBen Gardon return; 228f8e14497SBen Gardon 229f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 23064bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 23164bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 232f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 233f8e14497SBen Gardon } 234f8e14497SBen Gardon 235a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 236a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 237a6a0b05dSBen Gardon { 238a6a0b05dSBen Gardon bool pfn_changed; 239a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 240a6a0b05dSBen Gardon 241a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 242a6a0b05dSBen Gardon return; 243a6a0b05dSBen Gardon 244a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 245a6a0b05dSBen Gardon 246a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 247a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 248a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 249fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 250a6a0b05dSBen Gardon } 251a6a0b05dSBen Gardon } 252a6a0b05dSBen Gardon 2532f2fad08SBen Gardon /** 254a9442f59SBen Gardon * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU 255a9442f59SBen Gardon * 256a9442f59SBen Gardon * @kvm: kvm instance 257a9442f59SBen Gardon * @sp: the new page 258a9442f59SBen Gardon * @account_nx: This page replaces a NX large page and should be marked for 259a9442f59SBen Gardon * eventual reclaim. 260a9442f59SBen Gardon */ 261a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2629653f2daSSean Christopherson bool account_nx) 263a9442f59SBen Gardon { 2649a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 265a9442f59SBen Gardon list_add(&sp->link, &kvm->arch.tdp_mmu_pages); 266a9442f59SBen Gardon if (account_nx) 267a9442f59SBen Gardon account_huge_nx_page(kvm, sp); 2689a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 269a9442f59SBen Gardon } 270a9442f59SBen Gardon 271a9442f59SBen Gardon /** 272a9442f59SBen Gardon * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU 273a9442f59SBen Gardon * 274a9442f59SBen Gardon * @kvm: kvm instance 275a9442f59SBen Gardon * @sp: the page to be removed 2769a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 2779a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 2789a77daacSBen Gardon * threads that might be adding or removing pages. 279a9442f59SBen Gardon */ 2809a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2819a77daacSBen Gardon bool shared) 282a9442f59SBen Gardon { 2839a77daacSBen Gardon if (shared) 2849a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 2859a77daacSBen Gardon else 286a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 287a9442f59SBen Gardon 288a9442f59SBen Gardon list_del(&sp->link); 289a9442f59SBen Gardon if (sp->lpage_disallowed) 290a9442f59SBen Gardon unaccount_huge_nx_page(kvm, sp); 2919a77daacSBen Gardon 2929a77daacSBen Gardon if (shared) 2939a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 294a9442f59SBen Gardon } 295a9442f59SBen Gardon 296a9442f59SBen Gardon /** 297a066e61fSBen Gardon * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure 298a066e61fSBen Gardon * 299a066e61fSBen Gardon * @kvm: kvm instance 300a066e61fSBen Gardon * @pt: the page removed from the paging structure 3019a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 3029a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 3039a77daacSBen Gardon * threads that might be modifying SPTEs. 304a066e61fSBen Gardon * 305a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 306a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 30770fb3e41SBen Gardon * 30870fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 30970fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 31070fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 31170fb3e41SBen Gardon * early rcu_dereferences in the function. 312a066e61fSBen Gardon */ 31370fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, 3149a77daacSBen Gardon bool shared) 315a066e61fSBen Gardon { 31670fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 317a066e61fSBen Gardon int level = sp->role.level; 318e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 319a066e61fSBen Gardon u64 old_child_spte; 3209a77daacSBen Gardon u64 *sptep; 321e25f0e0cSBen Gardon gfn_t gfn; 322a066e61fSBen Gardon int i; 323a066e61fSBen Gardon 324a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 325a066e61fSBen Gardon 3269a77daacSBen Gardon tdp_mmu_unlink_page(kvm, sp, shared); 327a066e61fSBen Gardon 328a066e61fSBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 32970fb3e41SBen Gardon sptep = rcu_dereference(pt) + i; 330f1b83255SKai Huang gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 3319a77daacSBen Gardon 3329a77daacSBen Gardon if (shared) { 333e25f0e0cSBen Gardon /* 334e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 335e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 336e25f0e0cSBen Gardon * already marked as removed then another thread 337e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 338e25f0e0cSBen Gardon * set the SPTE until it is set from some other 339e25f0e0cSBen Gardon * value to the removed SPTE value. 340e25f0e0cSBen Gardon */ 341e25f0e0cSBen Gardon for (;;) { 342e25f0e0cSBen Gardon old_child_spte = xchg(sptep, REMOVED_SPTE); 343e25f0e0cSBen Gardon if (!is_removed_spte(old_child_spte)) 344e25f0e0cSBen Gardon break; 345e25f0e0cSBen Gardon cpu_relax(); 346e25f0e0cSBen Gardon } 3479a77daacSBen Gardon } else { 3488df9f1afSSean Christopherson /* 3498df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 3508df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 3518df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 3528df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 3538df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 3548df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 3558df9f1afSSean Christopherson * unreachable. 3568df9f1afSSean Christopherson */ 3579a77daacSBen Gardon old_child_spte = READ_ONCE(*sptep); 3588df9f1afSSean Christopherson if (!is_shadow_present_pte(old_child_spte)) 3598df9f1afSSean Christopherson continue; 360e25f0e0cSBen Gardon 361e25f0e0cSBen Gardon /* 362e25f0e0cSBen Gardon * Marking the SPTE as a removed SPTE is not 363e25f0e0cSBen Gardon * strictly necessary here as the MMU lock will 364e25f0e0cSBen Gardon * stop other threads from concurrently modifying 365e25f0e0cSBen Gardon * this SPTE. Using the removed SPTE value keeps 366e25f0e0cSBen Gardon * the two branches consistent and simplifies 367e25f0e0cSBen Gardon * the function. 368e25f0e0cSBen Gardon */ 369e25f0e0cSBen Gardon WRITE_ONCE(*sptep, REMOVED_SPTE); 3709a77daacSBen Gardon } 371e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 372f1b83255SKai Huang old_child_spte, REMOVED_SPTE, level, 373e25f0e0cSBen Gardon shared); 374a066e61fSBen Gardon } 375a066e61fSBen Gardon 376a066e61fSBen Gardon kvm_flush_remote_tlbs_with_address(kvm, gfn, 377f1b83255SKai Huang KVM_PAGES_PER_HPAGE(level + 1)); 378a066e61fSBen Gardon 3797cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 380a066e61fSBen Gardon } 381a066e61fSBen Gardon 382a066e61fSBen Gardon /** 3837f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 3842f2fad08SBen Gardon * @kvm: kvm instance 3852f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 3862f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 3872f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 3882f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 3892f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 3909a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 3919a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 3929a77daacSBen Gardon * threads that might be modifying SPTEs. 3932f2fad08SBen Gardon * 3942f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 3952f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 3962f2fad08SBen Gardon */ 3972f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3989a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 3999a77daacSBen Gardon bool shared) 4002f2fad08SBen Gardon { 4012f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 4022f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 4032f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 4042f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 4052f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 4062f2fad08SBen Gardon 4072f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 4082f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 409764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 4102f2fad08SBen Gardon 4112f2fad08SBen Gardon /* 4122f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 4132f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 4142f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 415d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 4162f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 4172f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 4182f2fad08SBen Gardon * thread before replacement. 4192f2fad08SBen Gardon */ 4202f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 4212f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 4222f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 4232f2fad08SBen Gardon "different PFN!\n" 4242f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4252f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4262f2fad08SBen Gardon 4272f2fad08SBen Gardon /* 4282f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 429d9f6e12fSIngo Molnar * corruption. 4302f2fad08SBen Gardon */ 4312f2fad08SBen Gardon BUG(); 4322f2fad08SBen Gardon } 4332f2fad08SBen Gardon 4342f2fad08SBen Gardon if (old_spte == new_spte) 4352f2fad08SBen Gardon return; 4362f2fad08SBen Gardon 437b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 438b9a98c34SBen Gardon 4392f2fad08SBen Gardon /* 4402f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 4412f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 4422f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 4432f2fad08SBen Gardon */ 4442f2fad08SBen Gardon if (!was_present && !is_present) { 4452f2fad08SBen Gardon /* 44608f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 44708f07c80SBen Gardon * it is unexpected. Log the change, though it should not 44808f07c80SBen Gardon * impact the guest since both the former and current SPTEs 44908f07c80SBen Gardon * are nonpresent. 4502f2fad08SBen Gardon */ 45108f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 45208f07c80SBen Gardon !is_mmio_spte(new_spte) && 45308f07c80SBen Gardon !is_removed_spte(new_spte))) 4542f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 4552f2fad08SBen Gardon "should not be replaced with another,\n" 4562f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 45708f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 45808f07c80SBen Gardon "a temporary removed SPTE.\n" 4592f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4602f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4612f2fad08SBen Gardon return; 4622f2fad08SBen Gardon } 4632f2fad08SBen Gardon 46471f51d2cSMingwei Zhang if (is_leaf != was_leaf) 46571f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 4662f2fad08SBen Gardon 4672f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 46864bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 4692f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 4702f2fad08SBen Gardon 4712f2fad08SBen Gardon /* 4722f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 4732f2fad08SBen Gardon * the paging structure. 4742f2fad08SBen Gardon */ 475a066e61fSBen Gardon if (was_present && !was_leaf && (pfn_changed || !is_present)) 476a066e61fSBen Gardon handle_removed_tdp_mmu_page(kvm, 4779a77daacSBen Gardon spte_to_child_pt(old_spte, level), shared); 4782f2fad08SBen Gardon } 4792f2fad08SBen Gardon 4802f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4819a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4829a77daacSBen Gardon bool shared) 4832f2fad08SBen Gardon { 4849a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 4859a77daacSBen Gardon shared); 486f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 487a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 488a6a0b05dSBen Gardon new_spte, level); 4892f2fad08SBen Gardon } 490faaf05b0SBen Gardon 491fe43fa2fSBen Gardon /* 49224ae4cfaSBen Gardon * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically 49324ae4cfaSBen Gardon * and handle the associated bookkeeping, but do not mark the page dirty 49424ae4cfaSBen Gardon * in KVM's dirty bitmaps. 4959a77daacSBen Gardon * 4969a77daacSBen Gardon * @kvm: kvm instance 4979a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 4989a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 4999a77daacSBen Gardon * Returns: true if the SPTE was set, false if it was not. If false is returned, 5009a77daacSBen Gardon * this function will have no side-effects. 5019a77daacSBen Gardon */ 50224ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, 5039a77daacSBen Gardon struct tdp_iter *iter, 5049a77daacSBen Gardon u64 new_spte) 5059a77daacSBen Gardon { 5069a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 5079a77daacSBen Gardon 50808f07c80SBen Gardon /* 50908f07c80SBen Gardon * Do not change removed SPTEs. Only the thread that froze the SPTE 51008f07c80SBen Gardon * may modify it. 51108f07c80SBen Gardon */ 5127a51393aSSean Christopherson if (is_removed_spte(iter->old_spte)) 51308f07c80SBen Gardon return false; 51408f07c80SBen Gardon 5156e8eb206SDavid Matlack /* 5166e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 5176e8eb206SDavid Matlack * does not hold the mmu_lock. 5186e8eb206SDavid Matlack */ 5199a77daacSBen Gardon if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, 5209a77daacSBen Gardon new_spte) != iter->old_spte) 5219a77daacSBen Gardon return false; 5229a77daacSBen Gardon 52324ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 52408889894SSean Christopherson new_spte, iter->level, true); 52524ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 5269a77daacSBen Gardon 5279a77daacSBen Gardon return true; 5289a77daacSBen Gardon } 5299a77daacSBen Gardon 530081de470SDavid Matlack /* 531081de470SDavid Matlack * tdp_mmu_map_set_spte_atomic - Set a leaf TDP MMU SPTE atomically to resolve a 532081de470SDavid Matlack * TDP page fault. 533081de470SDavid Matlack * 534081de470SDavid Matlack * @vcpu: The vcpu instance that took the TDP page fault. 535081de470SDavid Matlack * @iter: a tdp_iter instance currently on the SPTE that should be set 536081de470SDavid Matlack * @new_spte: The value the SPTE should be set to 537081de470SDavid Matlack * 538081de470SDavid Matlack * Returns: true if the SPTE was set, false if it was not. If false is returned, 539081de470SDavid Matlack * this function will have no side-effects. 540081de470SDavid Matlack */ 541081de470SDavid Matlack static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu, 54224ae4cfaSBen Gardon struct tdp_iter *iter, 54324ae4cfaSBen Gardon u64 new_spte) 54424ae4cfaSBen Gardon { 545081de470SDavid Matlack struct kvm *kvm = vcpu->kvm; 546081de470SDavid Matlack 54724ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) 54824ae4cfaSBen Gardon return false; 54924ae4cfaSBen Gardon 550081de470SDavid Matlack /* 551081de470SDavid Matlack * Use kvm_vcpu_gfn_to_memslot() instead of going through 552081de470SDavid Matlack * handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot. 553081de470SDavid Matlack */ 554081de470SDavid Matlack if (is_writable_pte(new_spte)) { 555081de470SDavid Matlack struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn); 556081de470SDavid Matlack 557081de470SDavid Matlack if (slot && kvm_slot_dirty_track_enabled(slot)) { 558081de470SDavid Matlack /* Enforced by kvm_mmu_hugepage_adjust. */ 559081de470SDavid Matlack WARN_ON_ONCE(iter->level > PG_LEVEL_4K); 560081de470SDavid Matlack mark_page_dirty_in_slot(kvm, slot, iter->gfn); 561081de470SDavid Matlack } 562081de470SDavid Matlack } 563081de470SDavid Matlack 56424ae4cfaSBen Gardon return true; 56524ae4cfaSBen Gardon } 56624ae4cfaSBen Gardon 56708f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, 56808f07c80SBen Gardon struct tdp_iter *iter) 56908f07c80SBen Gardon { 57008f07c80SBen Gardon /* 57108f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 57208f07c80SBen Gardon * non-present value. This will stop other threads from 57308f07c80SBen Gardon * immediately installing a present entry in its place 57408f07c80SBen Gardon * before the TLBs are flushed. 57508f07c80SBen Gardon */ 576081de470SDavid Matlack if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, REMOVED_SPTE)) 57708f07c80SBen Gardon return false; 57808f07c80SBen Gardon 57908f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 58008f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 58108f07c80SBen Gardon 58208f07c80SBen Gardon /* 58308f07c80SBen Gardon * No other thread can overwrite the removed SPTE as they 58408f07c80SBen Gardon * must either wait on the MMU lock or use 585d9f6e12fSIngo Molnar * tdp_mmu_set_spte_atomic which will not overwrite the 58608f07c80SBen Gardon * special removed SPTE value. No bookkeeping is needed 58708f07c80SBen Gardon * here since the SPTE is going from non-present 58808f07c80SBen Gardon * to non-present. 58908f07c80SBen Gardon */ 59014f6fec2SBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), 0); 59108f07c80SBen Gardon 59208f07c80SBen Gardon return true; 59308f07c80SBen Gardon } 59408f07c80SBen Gardon 5959a77daacSBen Gardon 5969a77daacSBen Gardon /* 597fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 598fe43fa2fSBen Gardon * @kvm: kvm instance 599fe43fa2fSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 600fe43fa2fSBen Gardon * @new_spte: The value the SPTE should be set to 601fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 602fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 603fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 604fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 605fe43fa2fSBen Gardon * double counted. 606fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 607fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 608fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 609fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 610fe43fa2fSBen Gardon * writes from being double counted. 611fe43fa2fSBen Gardon */ 612f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 613a6a0b05dSBen Gardon u64 new_spte, bool record_acc_track, 614a6a0b05dSBen Gardon bool record_dirty_log) 615faaf05b0SBen Gardon { 616531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 6173a9a4aa5SBen Gardon 61808f07c80SBen Gardon /* 61908f07c80SBen Gardon * No thread should be using this function to set SPTEs to the 62008f07c80SBen Gardon * temporary removed SPTE value. 62108f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 62208f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 62308f07c80SBen Gardon * use of the removed SPTE should not be necessary. 62408f07c80SBen Gardon */ 6257a51393aSSean Christopherson WARN_ON(is_removed_spte(iter->old_spte)); 62608f07c80SBen Gardon 6277cca2d0bSBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); 628faaf05b0SBen Gardon 62908889894SSean Christopherson __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 63008889894SSean Christopherson new_spte, iter->level, false); 631f8e14497SBen Gardon if (record_acc_track) 632f8e14497SBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, 633f8e14497SBen Gardon iter->level); 634a6a0b05dSBen Gardon if (record_dirty_log) 63508889894SSean Christopherson handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 636a6a0b05dSBen Gardon iter->old_spte, new_spte, 637a6a0b05dSBen Gardon iter->level); 638f8e14497SBen Gardon } 639f8e14497SBen Gardon 640f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 641f8e14497SBen Gardon u64 new_spte) 642f8e14497SBen Gardon { 643a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 644f8e14497SBen Gardon } 645f8e14497SBen Gardon 646f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 647f8e14497SBen Gardon struct tdp_iter *iter, 648f8e14497SBen Gardon u64 new_spte) 649f8e14497SBen Gardon { 650a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 651a6a0b05dSBen Gardon } 652a6a0b05dSBen Gardon 653a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 654a6a0b05dSBen Gardon struct tdp_iter *iter, 655a6a0b05dSBen Gardon u64 new_spte) 656a6a0b05dSBen Gardon { 657a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 658faaf05b0SBen Gardon } 659faaf05b0SBen Gardon 660faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 661faaf05b0SBen Gardon for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) 662faaf05b0SBen Gardon 663f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 664f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 665f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 666f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 667f8e14497SBen Gardon continue; \ 668f8e14497SBen Gardon else 669f8e14497SBen Gardon 670bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 671bb18842eSBen Gardon for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ 672bb18842eSBen Gardon _mmu->shadow_root_level, _start, _end) 673bb18842eSBen Gardon 674faaf05b0SBen Gardon /* 675e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 676e28a436cSBen Gardon * to the scheduler. 677e28a436cSBen Gardon * 678e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 679e139a34eSBen Gardon * TLB flush before yielding. 680e139a34eSBen Gardon * 681e28a436cSBen Gardon * If this function yields, it will also reset the tdp_iter's walk over the 682ed5e484bSBen Gardon * paging structure and the calling function should skip to the next 683ed5e484bSBen Gardon * iteration to allow the iterator to continue its traversal from the 684ed5e484bSBen Gardon * paging structure root. 685e28a436cSBen Gardon * 686e28a436cSBen Gardon * Return true if this function yielded and the iterator's traversal was reset. 687e28a436cSBen Gardon * Return false if a yield was not needed. 688e28a436cSBen Gardon */ 689e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, 6906103bc07SBen Gardon struct tdp_iter *iter, bool flush, 6916103bc07SBen Gardon bool shared) 692a6a0b05dSBen Gardon { 693ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 694ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 695ed5e484bSBen Gardon return false; 696ed5e484bSBen Gardon 697531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 6987cca2d0bSBen Gardon rcu_read_unlock(); 6997cca2d0bSBen Gardon 700e139a34eSBen Gardon if (flush) 701e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 702e139a34eSBen Gardon 7036103bc07SBen Gardon if (shared) 7046103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 7056103bc07SBen Gardon else 706531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 7076103bc07SBen Gardon 7087cca2d0bSBen Gardon rcu_read_lock(); 709ed5e484bSBen Gardon 710ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 711ed5e484bSBen Gardon 712b601c3bcSBen Gardon tdp_iter_restart(iter); 713ed5e484bSBen Gardon 714e28a436cSBen Gardon return true; 715a6a0b05dSBen Gardon } 716e28a436cSBen Gardon 717e28a436cSBen Gardon return false; 718a6a0b05dSBen Gardon } 719a6a0b05dSBen Gardon 720faaf05b0SBen Gardon /* 721faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 722faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 723faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 724faaf05b0SBen Gardon * MMU lock. 7256103bc07SBen Gardon * 726063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 727063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 728063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 729063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 7306103bc07SBen Gardon * operation can cause a soft lockup. 7316103bc07SBen Gardon * 7326103bc07SBen Gardon * If shared is true, this thread holds the MMU lock in read mode and must 7336103bc07SBen Gardon * account for the possibility that other threads are modifying the paging 7346103bc07SBen Gardon * structures concurrently. If shared is false, this thread should hold the 7356103bc07SBen Gardon * MMU lock in write mode. 736faaf05b0SBen Gardon */ 737faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 7386103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 7396103bc07SBen Gardon bool shared) 740faaf05b0SBen Gardon { 741524a1e4eSSean Christopherson gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 742524a1e4eSSean Christopherson bool zap_all = (start == 0 && end >= max_gfn_host); 743faaf05b0SBen Gardon struct tdp_iter iter; 744faaf05b0SBen Gardon 745524a1e4eSSean Christopherson /* 7460103098fSSean Christopherson * No need to try to step down in the iterator when zapping all SPTEs, 7470103098fSSean Christopherson * zapping the top-level non-leaf SPTEs will recurse on their children. 7480103098fSSean Christopherson */ 7490103098fSSean Christopherson int min_level = zap_all ? root->role.level : PG_LEVEL_4K; 7500103098fSSean Christopherson 7510103098fSSean Christopherson /* 752524a1e4eSSean Christopherson * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will 753524a1e4eSSean Christopherson * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, 754524a1e4eSSean Christopherson * and so KVM will never install a SPTE for such addresses. 755524a1e4eSSean Christopherson */ 756524a1e4eSSean Christopherson end = min(end, max_gfn_host); 757524a1e4eSSean Christopherson 7586103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 7596103bc07SBen Gardon 7607cca2d0bSBen Gardon rcu_read_lock(); 7617cca2d0bSBen Gardon 7620103098fSSean Christopherson for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 7630103098fSSean Christopherson min_level, start, end) { 7646103bc07SBen Gardon retry: 7651af4a960SBen Gardon if (can_yield && 7666103bc07SBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { 767a835429cSSean Christopherson flush = false; 7681af4a960SBen Gardon continue; 7691af4a960SBen Gardon } 7701af4a960SBen Gardon 771faaf05b0SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 772faaf05b0SBen Gardon continue; 773faaf05b0SBen Gardon 774faaf05b0SBen Gardon /* 775faaf05b0SBen Gardon * If this is a non-last-level SPTE that covers a larger range 776faaf05b0SBen Gardon * than should be zapped, continue, and zap the mappings at a 777524a1e4eSSean Christopherson * lower level, except when zapping all SPTEs. 778faaf05b0SBen Gardon */ 779524a1e4eSSean Christopherson if (!zap_all && 780524a1e4eSSean Christopherson (iter.gfn < start || 781faaf05b0SBen Gardon iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 782faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 783faaf05b0SBen Gardon continue; 784faaf05b0SBen Gardon 7856103bc07SBen Gardon if (!shared) { 786faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 787a835429cSSean Christopherson flush = true; 7886103bc07SBen Gardon } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 7896103bc07SBen Gardon /* 7906103bc07SBen Gardon * The iter must explicitly re-read the SPTE because 7916103bc07SBen Gardon * the atomic cmpxchg failed. 7926103bc07SBen Gardon */ 7936103bc07SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 7946103bc07SBen Gardon goto retry; 7956103bc07SBen Gardon } 796faaf05b0SBen Gardon } 7977cca2d0bSBen Gardon 7987cca2d0bSBen Gardon rcu_read_unlock(); 799a835429cSSean Christopherson return flush; 800faaf05b0SBen Gardon } 801faaf05b0SBen Gardon 802faaf05b0SBen Gardon /* 803faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 804faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 805faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 806faaf05b0SBen Gardon * MMU lock. 807faaf05b0SBen Gardon */ 8082b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 8095a324c24SSean Christopherson gfn_t end, bool can_yield, bool flush) 810faaf05b0SBen Gardon { 811faaf05b0SBen Gardon struct kvm_mmu_page *root; 812faaf05b0SBen Gardon 8135a324c24SSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false) 8146103bc07SBen Gardon flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, 8155a324c24SSean Christopherson false); 816faaf05b0SBen Gardon 817faaf05b0SBen Gardon return flush; 818faaf05b0SBen Gardon } 819faaf05b0SBen Gardon 820faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 821faaf05b0SBen Gardon { 8222b9663d8SSean Christopherson bool flush = false; 8232b9663d8SSean Christopherson int i; 824faaf05b0SBen Gardon 8252b9663d8SSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 8265a324c24SSean Christopherson flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush); 8272b9663d8SSean Christopherson 828faaf05b0SBen Gardon if (flush) 829faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 830faaf05b0SBen Gardon } 831bb18842eSBen Gardon 8324c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, 8334c6654bdSBen Gardon struct kvm_mmu_page *prev_root) 8344c6654bdSBen Gardon { 8354c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8364c6654bdSBen Gardon 8374c6654bdSBen Gardon if (prev_root) 8384c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8394c6654bdSBen Gardon &prev_root->link, 8404c6654bdSBen Gardon typeof(*prev_root), link); 8414c6654bdSBen Gardon else 8424c6654bdSBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8434c6654bdSBen Gardon typeof(*next_root), link); 8444c6654bdSBen Gardon 8454c6654bdSBen Gardon while (next_root && !(next_root->role.invalid && 8464c6654bdSBen Gardon refcount_read(&next_root->tdp_mmu_root_count))) 8474c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8484c6654bdSBen Gardon &next_root->link, 8494c6654bdSBen Gardon typeof(*next_root), link); 8504c6654bdSBen Gardon 8514c6654bdSBen Gardon return next_root; 8524c6654bdSBen Gardon } 8534c6654bdSBen Gardon 8544c6654bdSBen Gardon /* 8554c6654bdSBen Gardon * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each 8564c6654bdSBen Gardon * invalidated root, they will not be freed until this function drops the 8574c6654bdSBen Gardon * reference. Before dropping that reference, tear down the paging 8584c6654bdSBen Gardon * structure so that whichever thread does drop the last reference 8594c6654bdSBen Gardon * only has to do a trivial amount of work. Since the roots are invalid, 8604c6654bdSBen Gardon * no new SPTEs should be created under them. 8614c6654bdSBen Gardon */ 8624c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 8634c6654bdSBen Gardon { 8644c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8654c6654bdSBen Gardon struct kvm_mmu_page *root; 8664c6654bdSBen Gardon bool flush = false; 8674c6654bdSBen Gardon 8684c6654bdSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 8694c6654bdSBen Gardon 8704c6654bdSBen Gardon rcu_read_lock(); 8714c6654bdSBen Gardon 8724c6654bdSBen Gardon root = next_invalidated_root(kvm, NULL); 8734c6654bdSBen Gardon 8744c6654bdSBen Gardon while (root) { 8754c6654bdSBen Gardon next_root = next_invalidated_root(kvm, root); 8764c6654bdSBen Gardon 8774c6654bdSBen Gardon rcu_read_unlock(); 8784c6654bdSBen Gardon 879524a1e4eSSean Christopherson flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); 8804c6654bdSBen Gardon 8814c6654bdSBen Gardon /* 8824c6654bdSBen Gardon * Put the reference acquired in 8834c6654bdSBen Gardon * kvm_tdp_mmu_invalidate_roots 8844c6654bdSBen Gardon */ 8854c6654bdSBen Gardon kvm_tdp_mmu_put_root(kvm, root, true); 8864c6654bdSBen Gardon 8874c6654bdSBen Gardon root = next_root; 8884c6654bdSBen Gardon 8894c6654bdSBen Gardon rcu_read_lock(); 8904c6654bdSBen Gardon } 8914c6654bdSBen Gardon 8924c6654bdSBen Gardon rcu_read_unlock(); 8934c6654bdSBen Gardon 8944c6654bdSBen Gardon if (flush) 8954c6654bdSBen Gardon kvm_flush_remote_tlbs(kvm); 8964c6654bdSBen Gardon } 8974c6654bdSBen Gardon 898bb18842eSBen Gardon /* 899b7cccd39SBen Gardon * Mark each TDP MMU root as invalid so that other threads 900b7cccd39SBen Gardon * will drop their references and allow the root count to 901b7cccd39SBen Gardon * go to 0. 902b7cccd39SBen Gardon * 9034c6654bdSBen Gardon * Also take a reference on all roots so that this thread 9044c6654bdSBen Gardon * can do the bulk of the work required to free the roots 9054c6654bdSBen Gardon * once they are invalidated. Without this reference, a 9064c6654bdSBen Gardon * vCPU thread might drop the last reference to a root and 9074c6654bdSBen Gardon * get stuck with tearing down the entire paging structure. 9084c6654bdSBen Gardon * 9094c6654bdSBen Gardon * Roots which have a zero refcount should be skipped as 9104c6654bdSBen Gardon * they're already being torn down. 9114c6654bdSBen Gardon * Already invalid roots should be referenced again so that 9124c6654bdSBen Gardon * they aren't freed before kvm_tdp_mmu_zap_all_fast is 9134c6654bdSBen Gardon * done with them. 9144c6654bdSBen Gardon * 915b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 916b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 917b7cccd39SBen Gardon */ 918b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 919b7cccd39SBen Gardon { 920b7cccd39SBen Gardon struct kvm_mmu_page *root; 921b7cccd39SBen Gardon 922b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 923b7cccd39SBen Gardon list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) 9244c6654bdSBen Gardon if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) 925b7cccd39SBen Gardon root->role.invalid = true; 926b7cccd39SBen Gardon } 927b7cccd39SBen Gardon 928bb18842eSBen Gardon /* 929bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 930bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 931bb18842eSBen Gardon */ 932bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, 933bb18842eSBen Gardon int map_writable, 934bb18842eSBen Gardon struct tdp_iter *iter, 935bb18842eSBen Gardon kvm_pfn_t pfn, bool prefault) 936bb18842eSBen Gardon { 937bb18842eSBen Gardon u64 new_spte; 93857a3e96dSKai Huang int ret = RET_PF_FIXED; 939bb18842eSBen Gardon int make_spte_ret = 0; 940bb18842eSBen Gardon 9419a77daacSBen Gardon if (unlikely(is_noslot_pfn(pfn))) 942bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 9439a77daacSBen Gardon else 944bb18842eSBen Gardon make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, 945bb18842eSBen Gardon pfn, iter->old_spte, prefault, true, 946bb18842eSBen Gardon map_writable, !shadow_accessed_mask, 947bb18842eSBen Gardon &new_spte); 948bb18842eSBen Gardon 949bb18842eSBen Gardon if (new_spte == iter->old_spte) 950bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 951081de470SDavid Matlack else if (!tdp_mmu_map_set_spte_atomic(vcpu, iter, new_spte)) 9529a77daacSBen Gardon return RET_PF_RETRY; 953bb18842eSBen Gardon 954bb18842eSBen Gardon /* 955bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 956bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 957bb18842eSBen Gardon * the vCPU would have the same fault again. 958bb18842eSBen Gardon */ 959bb18842eSBen Gardon if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { 960bb18842eSBen Gardon if (write) 961bb18842eSBen Gardon ret = RET_PF_EMULATE; 962bb18842eSBen Gardon } 963bb18842eSBen Gardon 964bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 9659a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 9669a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 9679a77daacSBen Gardon new_spte); 968bb18842eSBen Gardon ret = RET_PF_EMULATE; 9693849e092SSean Christopherson } else { 9709a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 9719a77daacSBen Gardon rcu_dereference(iter->sptep)); 9723849e092SSean Christopherson } 973bb18842eSBen Gardon 974857f8474SKai Huang /* 975857f8474SKai Huang * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be 976857f8474SKai Huang * consistent with legacy MMU behavior. 977857f8474SKai Huang */ 978857f8474SKai Huang if (ret != RET_PF_SPURIOUS) 979bb18842eSBen Gardon vcpu->stat.pf_fixed++; 980bb18842eSBen Gardon 981bb18842eSBen Gardon return ret; 982bb18842eSBen Gardon } 983bb18842eSBen Gardon 984bb18842eSBen Gardon /* 985bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 986bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 987bb18842eSBen Gardon */ 988*2f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 989bb18842eSBen Gardon { 990bb18842eSBen Gardon bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); 991*2f6305ddSPaolo Bonzini bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; 992bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 993bb18842eSBen Gardon struct tdp_iter iter; 99489c0fd49SBen Gardon struct kvm_mmu_page *sp; 995bb18842eSBen Gardon u64 *child_pt; 996bb18842eSBen Gardon u64 new_spte; 997bb18842eSBen Gardon int ret; 998bb18842eSBen Gardon int level; 999bb18842eSBen Gardon int req_level; 1000bb18842eSBen Gardon 1001*2f6305ddSPaolo Bonzini level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, 1002bb18842eSBen Gardon huge_page_disallowed, &req_level); 1003bb18842eSBen Gardon 1004*2f6305ddSPaolo Bonzini trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); 10057cca2d0bSBen Gardon 10067cca2d0bSBen Gardon rcu_read_lock(); 10077cca2d0bSBen Gardon 1008*2f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 1009bb18842eSBen Gardon if (nx_huge_page_workaround_enabled) 1010*2f6305ddSPaolo Bonzini disallowed_hugepage_adjust(iter.old_spte, fault->gfn, 1011*2f6305ddSPaolo Bonzini iter.level, &fault->pfn, &level); 1012bb18842eSBen Gardon 1013bb18842eSBen Gardon if (iter.level == level) 1014bb18842eSBen Gardon break; 1015bb18842eSBen Gardon 1016bb18842eSBen Gardon /* 1017bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 1018bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 1019bb18842eSBen Gardon * with a non-leaf SPTE. 1020bb18842eSBen Gardon */ 1021bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 1022bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 102308f07c80SBen Gardon if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 10249a77daacSBen Gardon break; 1025bb18842eSBen Gardon 1026bb18842eSBen Gardon /* 1027bb18842eSBen Gardon * The iter must explicitly re-read the spte here 1028bb18842eSBen Gardon * because the new value informs the !present 1029bb18842eSBen Gardon * path below. 1030bb18842eSBen Gardon */ 10317cca2d0bSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 1032bb18842eSBen Gardon } 1033bb18842eSBen Gardon 1034bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 1035ff76d506SKai Huang /* 1036c4342633SIngo Molnar * If SPTE has been frozen by another thread, just 1037ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 1038ff76d506SKai Huang * allocation and free. 1039ff76d506SKai Huang */ 1040ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1041ff76d506SKai Huang break; 1042ff76d506SKai Huang 1043f1b83255SKai Huang sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1); 104489c0fd49SBen Gardon child_pt = sp->spt; 1045a9442f59SBen Gardon 1046bb18842eSBen Gardon new_spte = make_nonleaf_spte(child_pt, 1047bb18842eSBen Gardon !shadow_accessed_mask); 1048bb18842eSBen Gardon 1049081de470SDavid Matlack if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) { 10509653f2daSSean Christopherson tdp_mmu_link_page(vcpu->kvm, sp, 10519a77daacSBen Gardon huge_page_disallowed && 10529a77daacSBen Gardon req_level >= iter.level); 10539a77daacSBen Gardon 1054bb18842eSBen Gardon trace_kvm_mmu_get_page(sp, true); 10559a77daacSBen Gardon } else { 10569a77daacSBen Gardon tdp_mmu_free_sp(sp); 10579a77daacSBen Gardon break; 10589a77daacSBen Gardon } 1059bb18842eSBen Gardon } 1060bb18842eSBen Gardon } 1061bb18842eSBen Gardon 10629a77daacSBen Gardon if (iter.level != level) { 10637cca2d0bSBen Gardon rcu_read_unlock(); 1064bb18842eSBen Gardon return RET_PF_RETRY; 10657cca2d0bSBen Gardon } 1066bb18842eSBen Gardon 1067*2f6305ddSPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter, 1068*2f6305ddSPaolo Bonzini fault->pfn, fault->prefault); 10697cca2d0bSBen Gardon rcu_read_unlock(); 1070bb18842eSBen Gardon 1071bb18842eSBen Gardon return ret; 1072bb18842eSBen Gardon } 1073063afacdSBen Gardon 10743039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 10753039bcc7SSean Christopherson bool flush) 1076063afacdSBen Gardon { 1077063afacdSBen Gardon struct kvm_mmu_page *root; 1078063afacdSBen Gardon 10793039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) 10803039bcc7SSean Christopherson flush |= zap_gfn_range(kvm, root, range->start, range->end, 10816103bc07SBen Gardon range->may_block, flush, false); 1082063afacdSBen Gardon 10833039bcc7SSean Christopherson return flush; 10843039bcc7SSean Christopherson } 10853039bcc7SSean Christopherson 10863039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 10873039bcc7SSean Christopherson struct kvm_gfn_range *range); 10883039bcc7SSean Christopherson 10893039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 10903039bcc7SSean Christopherson struct kvm_gfn_range *range, 1091c1b91493SSean Christopherson tdp_handler_t handler) 1092063afacdSBen Gardon { 1093063afacdSBen Gardon struct kvm_mmu_page *root; 10943039bcc7SSean Christopherson struct tdp_iter iter; 10953039bcc7SSean Christopherson bool ret = false; 1096063afacdSBen Gardon 10973039bcc7SSean Christopherson rcu_read_lock(); 1098063afacdSBen Gardon 1099063afacdSBen Gardon /* 1100e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1101e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1102063afacdSBen Gardon */ 11033039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 11043039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 11053039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 11063039bcc7SSean Christopherson } 1107063afacdSBen Gardon 11083039bcc7SSean Christopherson rcu_read_unlock(); 1109063afacdSBen Gardon 1110063afacdSBen Gardon return ret; 1111063afacdSBen Gardon } 1112063afacdSBen Gardon 1113f8e14497SBen Gardon /* 1114f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1115f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1116f8e14497SBen Gardon */ 11173039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 11183039bcc7SSean Christopherson struct kvm_gfn_range *range) 1119f8e14497SBen Gardon { 1120f8e14497SBen Gardon u64 new_spte = 0; 1121f8e14497SBen Gardon 11223039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 11233039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 11243039bcc7SSean Christopherson return false; 11257cca2d0bSBen Gardon 11263039bcc7SSean Christopherson new_spte = iter->old_spte; 1127f8e14497SBen Gardon 1128f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 11298f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1130f8e14497SBen Gardon } else { 1131f8e14497SBen Gardon /* 1132f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1133f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1134f8e14497SBen Gardon */ 1135f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1136f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1137f8e14497SBen Gardon 1138f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1139f8e14497SBen Gardon } 1140f8e14497SBen Gardon 11413039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 114233dd3574SBen Gardon 11433039bcc7SSean Christopherson return true; 1144f8e14497SBen Gardon } 1145f8e14497SBen Gardon 11463039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1147f8e14497SBen Gardon { 11483039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1149f8e14497SBen Gardon } 1150f8e14497SBen Gardon 11513039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 11523039bcc7SSean Christopherson struct kvm_gfn_range *range) 1153f8e14497SBen Gardon { 11543039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1155f8e14497SBen Gardon } 1156f8e14497SBen Gardon 11573039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1158f8e14497SBen Gardon { 11593039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 11603039bcc7SSean Christopherson } 11613039bcc7SSean Christopherson 11623039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 11633039bcc7SSean Christopherson struct kvm_gfn_range *range) 11643039bcc7SSean Christopherson { 11653039bcc7SSean Christopherson u64 new_spte; 11663039bcc7SSean Christopherson 11673039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 11683039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 11693039bcc7SSean Christopherson 11703039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 11713039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 11723039bcc7SSean Christopherson return false; 11733039bcc7SSean Christopherson 11743039bcc7SSean Christopherson /* 11753039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 11763039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 11773039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 11783039bcc7SSean Christopherson * See __handle_changed_spte(). 11793039bcc7SSean Christopherson */ 11803039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 11813039bcc7SSean Christopherson 11823039bcc7SSean Christopherson if (!pte_write(range->pte)) { 11833039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 11843039bcc7SSean Christopherson pte_pfn(range->pte)); 11853039bcc7SSean Christopherson 11863039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 11873039bcc7SSean Christopherson } 11883039bcc7SSean Christopherson 11893039bcc7SSean Christopherson return true; 1190f8e14497SBen Gardon } 11911d8dd6b3SBen Gardon 11921d8dd6b3SBen Gardon /* 11931d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 11941d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 11951d8dd6b3SBen Gardon * notifier. 11961d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 11971d8dd6b3SBen Gardon */ 11983039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 11991d8dd6b3SBen Gardon { 12003039bcc7SSean Christopherson bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 12011d8dd6b3SBen Gardon 12023039bcc7SSean Christopherson /* FIXME: return 'flush' instead of flushing here. */ 12033039bcc7SSean Christopherson if (flush) 12043039bcc7SSean Christopherson kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); 12057cca2d0bSBen Gardon 12063039bcc7SSean Christopherson return false; 12071d8dd6b3SBen Gardon } 12081d8dd6b3SBen Gardon 1209a6a0b05dSBen Gardon /* 1210bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1211bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1212bedd9195SDavid Matlack * be flushed. 1213a6a0b05dSBen Gardon */ 1214a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1215a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1216a6a0b05dSBen Gardon { 1217a6a0b05dSBen Gardon struct tdp_iter iter; 1218a6a0b05dSBen Gardon u64 new_spte; 1219a6a0b05dSBen Gardon bool spte_set = false; 1220a6a0b05dSBen Gardon 12217cca2d0bSBen Gardon rcu_read_lock(); 12227cca2d0bSBen Gardon 1223a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1224a6a0b05dSBen Gardon 1225a6a0b05dSBen Gardon for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 1226a6a0b05dSBen Gardon min_level, start, end) { 122724ae4cfaSBen Gardon retry: 122824ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12291af4a960SBen Gardon continue; 12301af4a960SBen Gardon 1231a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 12320f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 12330f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1234a6a0b05dSBen Gardon continue; 1235a6a0b05dSBen Gardon 1236a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1237a6a0b05dSBen Gardon 123824ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, 123924ae4cfaSBen Gardon new_spte)) { 124024ae4cfaSBen Gardon /* 124124ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 124224ae4cfaSBen Gardon * the atomic cmpxchg failed. 124324ae4cfaSBen Gardon */ 124424ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 124524ae4cfaSBen Gardon goto retry; 124624ae4cfaSBen Gardon } 1247a6a0b05dSBen Gardon spte_set = true; 1248a6a0b05dSBen Gardon } 12497cca2d0bSBen Gardon 12507cca2d0bSBen Gardon rcu_read_unlock(); 1251a6a0b05dSBen Gardon return spte_set; 1252a6a0b05dSBen Gardon } 1253a6a0b05dSBen Gardon 1254a6a0b05dSBen Gardon /* 1255a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1256a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1257a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1258a6a0b05dSBen Gardon */ 1259269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1260269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1261a6a0b05dSBen Gardon { 1262a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1263a6a0b05dSBen Gardon bool spte_set = false; 1264a6a0b05dSBen Gardon 126524ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1266a6a0b05dSBen Gardon 126724ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1268a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1269a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1270a6a0b05dSBen Gardon 1271a6a0b05dSBen Gardon return spte_set; 1272a6a0b05dSBen Gardon } 1273a6a0b05dSBen Gardon 1274a6a0b05dSBen Gardon /* 1275a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1276a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1277a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1278a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1279a6a0b05dSBen Gardon * be flushed. 1280a6a0b05dSBen Gardon */ 1281a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1282a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1283a6a0b05dSBen Gardon { 1284a6a0b05dSBen Gardon struct tdp_iter iter; 1285a6a0b05dSBen Gardon u64 new_spte; 1286a6a0b05dSBen Gardon bool spte_set = false; 1287a6a0b05dSBen Gardon 12887cca2d0bSBen Gardon rcu_read_lock(); 12897cca2d0bSBen Gardon 1290a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 129124ae4cfaSBen Gardon retry: 129224ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12931af4a960SBen Gardon continue; 12941af4a960SBen Gardon 1295a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1296a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1297a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1298a6a0b05dSBen Gardon else 1299a6a0b05dSBen Gardon continue; 1300a6a0b05dSBen Gardon } else { 1301a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1302a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1303a6a0b05dSBen Gardon else 1304a6a0b05dSBen Gardon continue; 1305a6a0b05dSBen Gardon } 1306a6a0b05dSBen Gardon 130724ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, 130824ae4cfaSBen Gardon new_spte)) { 130924ae4cfaSBen Gardon /* 131024ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 131124ae4cfaSBen Gardon * the atomic cmpxchg failed. 131224ae4cfaSBen Gardon */ 131324ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 131424ae4cfaSBen Gardon goto retry; 131524ae4cfaSBen Gardon } 1316a6a0b05dSBen Gardon spte_set = true; 1317a6a0b05dSBen Gardon } 13187cca2d0bSBen Gardon 13197cca2d0bSBen Gardon rcu_read_unlock(); 1320a6a0b05dSBen Gardon return spte_set; 1321a6a0b05dSBen Gardon } 1322a6a0b05dSBen Gardon 1323a6a0b05dSBen Gardon /* 1324a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1325a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1326a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1327a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1328a6a0b05dSBen Gardon * be flushed. 1329a6a0b05dSBen Gardon */ 1330269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1331269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1332a6a0b05dSBen Gardon { 1333a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1334a6a0b05dSBen Gardon bool spte_set = false; 1335a6a0b05dSBen Gardon 133624ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1337a6a0b05dSBen Gardon 133824ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1339a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1340a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1341a6a0b05dSBen Gardon 1342a6a0b05dSBen Gardon return spte_set; 1343a6a0b05dSBen Gardon } 1344a6a0b05dSBen Gardon 1345a6a0b05dSBen Gardon /* 1346a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1347a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1348a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1349a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1350a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1351a6a0b05dSBen Gardon */ 1352a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1353a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1354a6a0b05dSBen Gardon { 1355a6a0b05dSBen Gardon struct tdp_iter iter; 1356a6a0b05dSBen Gardon u64 new_spte; 1357a6a0b05dSBen Gardon 13587cca2d0bSBen Gardon rcu_read_lock(); 13597cca2d0bSBen Gardon 1360a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1361a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1362a6a0b05dSBen Gardon if (!mask) 1363a6a0b05dSBen Gardon break; 1364a6a0b05dSBen Gardon 1365a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1366a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1367a6a0b05dSBen Gardon continue; 1368a6a0b05dSBen Gardon 1369f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1370f1b3b06aSBen Gardon 1371a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1372a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1373a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1374a6a0b05dSBen Gardon else 1375a6a0b05dSBen Gardon continue; 1376a6a0b05dSBen Gardon } else { 1377a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1378a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1379a6a0b05dSBen Gardon else 1380a6a0b05dSBen Gardon continue; 1381a6a0b05dSBen Gardon } 1382a6a0b05dSBen Gardon 1383a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1384a6a0b05dSBen Gardon } 13857cca2d0bSBen Gardon 13867cca2d0bSBen Gardon rcu_read_unlock(); 1387a6a0b05dSBen Gardon } 1388a6a0b05dSBen Gardon 1389a6a0b05dSBen Gardon /* 1390a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1391a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1392a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1393a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1394a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1395a6a0b05dSBen Gardon */ 1396a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1397a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1398a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1399a6a0b05dSBen Gardon bool wrprot) 1400a6a0b05dSBen Gardon { 1401a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1402a6a0b05dSBen Gardon 1403531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1404a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1405a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1406a6a0b05dSBen Gardon } 1407a6a0b05dSBen Gardon 1408a6a0b05dSBen Gardon /* 140987aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 141087aa9ec9SBen Gardon * GFNs within the slot. 141114881998SBen Gardon */ 1412af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm, 141314881998SBen Gardon struct kvm_mmu_page *root, 14148ca6f063SBen Gardon const struct kvm_memory_slot *slot, 1415af95b53eSSean Christopherson bool flush) 141614881998SBen Gardon { 14179eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 14189eba50f8SSean Christopherson gfn_t end = start + slot->npages; 141914881998SBen Gardon struct tdp_iter iter; 142014881998SBen Gardon kvm_pfn_t pfn; 142114881998SBen Gardon 14227cca2d0bSBen Gardon rcu_read_lock(); 14237cca2d0bSBen Gardon 142414881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 14252db6f772SBen Gardon retry: 14262db6f772SBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { 1427af95b53eSSean Christopherson flush = false; 14281af4a960SBen Gardon continue; 14291af4a960SBen Gardon } 14301af4a960SBen Gardon 143114881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 143287aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 143314881998SBen Gardon continue; 143414881998SBen Gardon 143514881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 143614881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 14379eba50f8SSean Christopherson iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, 14389eba50f8SSean Christopherson pfn, PG_LEVEL_NUM)) 143914881998SBen Gardon continue; 144014881998SBen Gardon 14412db6f772SBen Gardon if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 14422db6f772SBen Gardon /* 14432db6f772SBen Gardon * The iter must explicitly re-read the SPTE because 14442db6f772SBen Gardon * the atomic cmpxchg failed. 14452db6f772SBen Gardon */ 14462db6f772SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 14472db6f772SBen Gardon goto retry; 14482db6f772SBen Gardon } 1449af95b53eSSean Christopherson flush = true; 145014881998SBen Gardon } 145114881998SBen Gardon 14527cca2d0bSBen Gardon rcu_read_unlock(); 1453af95b53eSSean Christopherson 1454af95b53eSSean Christopherson return flush; 145514881998SBen Gardon } 145614881998SBen Gardon 145714881998SBen Gardon /* 145814881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 145914881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 146014881998SBen Gardon */ 1461142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 14628ca6f063SBen Gardon const struct kvm_memory_slot *slot, 14638ca6f063SBen Gardon bool flush) 146414881998SBen Gardon { 146514881998SBen Gardon struct kvm_mmu_page *root; 146614881998SBen Gardon 14672db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 146814881998SBen Gardon 14692db6f772SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1470af95b53eSSean Christopherson flush = zap_collapsible_spte_range(kvm, root, slot, flush); 1471af95b53eSSean Christopherson 1472142ccde1SSean Christopherson return flush; 147314881998SBen Gardon } 147446044f72SBen Gardon 147546044f72SBen Gardon /* 147646044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14775fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 147846044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 147946044f72SBen Gardon */ 148046044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 14813ad93562SKeqian Zhu gfn_t gfn, int min_level) 148246044f72SBen Gardon { 148346044f72SBen Gardon struct tdp_iter iter; 148446044f72SBen Gardon u64 new_spte; 148546044f72SBen Gardon bool spte_set = false; 148646044f72SBen Gardon 14873ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 14883ad93562SKeqian Zhu 14897cca2d0bSBen Gardon rcu_read_lock(); 14907cca2d0bSBen Gardon 14913ad93562SKeqian Zhu for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 14923ad93562SKeqian Zhu min_level, gfn, gfn + 1) { 14933ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 14943ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 14953ad93562SKeqian Zhu continue; 14963ad93562SKeqian Zhu 149746044f72SBen Gardon if (!is_writable_pte(iter.old_spte)) 149846044f72SBen Gardon break; 149946044f72SBen Gardon 150046044f72SBen Gardon new_spte = iter.old_spte & 15015fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 150246044f72SBen Gardon 150346044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 150446044f72SBen Gardon spte_set = true; 150546044f72SBen Gardon } 150646044f72SBen Gardon 15077cca2d0bSBen Gardon rcu_read_unlock(); 15087cca2d0bSBen Gardon 150946044f72SBen Gardon return spte_set; 151046044f72SBen Gardon } 151146044f72SBen Gardon 151246044f72SBen Gardon /* 151346044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 15145fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 151546044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 151646044f72SBen Gardon */ 151746044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 15183ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 15193ad93562SKeqian Zhu int min_level) 152046044f72SBen Gardon { 152146044f72SBen Gardon struct kvm_mmu_page *root; 152246044f72SBen Gardon bool spte_set = false; 152346044f72SBen Gardon 1524531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1525a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 15263ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1527a3f15bdaSSean Christopherson 152846044f72SBen Gardon return spte_set; 152946044f72SBen Gardon } 153046044f72SBen Gardon 153195fb5b02SBen Gardon /* 153295fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 153395fb5b02SBen Gardon * That SPTE may be non-present. 1534c5c8c7c5SDavid Matlack * 1535c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 153695fb5b02SBen Gardon */ 153739b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 153839b4d43eSSean Christopherson int *root_level) 153995fb5b02SBen Gardon { 154095fb5b02SBen Gardon struct tdp_iter iter; 154195fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 154295fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 15432aa07893SSean Christopherson int leaf = -1; 154495fb5b02SBen Gardon 154539b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 154695fb5b02SBen Gardon 154795fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 154895fb5b02SBen Gardon leaf = iter.level; 1549dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 155095fb5b02SBen Gardon } 155195fb5b02SBen Gardon 155295fb5b02SBen Gardon return leaf; 155395fb5b02SBen Gardon } 15546e8eb206SDavid Matlack 15556e8eb206SDavid Matlack /* 15566e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 15576e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 15586e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 15596e8eb206SDavid Matlack * 15606e8eb206SDavid Matlack * Contract: 15616e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 15626e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 15636e8eb206SDavid Matlack * 15646e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 15656e8eb206SDavid Matlack */ 15666e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 15676e8eb206SDavid Matlack u64 *spte) 15686e8eb206SDavid Matlack { 15696e8eb206SDavid Matlack struct tdp_iter iter; 15706e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 15716e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 15726e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 15736e8eb206SDavid Matlack 15746e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 15756e8eb206SDavid Matlack *spte = iter.old_spte; 15766e8eb206SDavid Matlack sptep = iter.sptep; 15776e8eb206SDavid Matlack } 15786e8eb206SDavid Matlack 15796e8eb206SDavid Matlack /* 15806e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 15816e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 15826e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 15836e8eb206SDavid Matlack * annotation. 15846e8eb206SDavid Matlack * 15856e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 15866e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 15876e8eb206SDavid Matlack * outside of mmu_lock. 15886e8eb206SDavid Matlack */ 15896e8eb206SDavid Matlack return rcu_dereference(sptep); 15906e8eb206SDavid Matlack } 1591