1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 20d501f747SBen Gardon return false; 21fe5db27dSBen Gardon 22fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 23fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 2402c00b3aSBen Gardon 2502c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 269a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 2789c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 28d501f747SBen Gardon 29d501f747SBen Gardon return true; 30fe5db27dSBen Gardon } 31fe5db27dSBen Gardon 326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 336103bc07SBen Gardon bool shared) 346103bc07SBen Gardon { 356103bc07SBen Gardon if (shared) 366103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 376103bc07SBen Gardon else 386103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 396103bc07SBen Gardon } 406103bc07SBen Gardon 41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42fe5db27dSBen Gardon { 43fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 44fe5db27dSBen Gardon return; 4502c00b3aSBen Gardon 46524a1e4eSSean Christopherson WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); 4702c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 487cca2d0bSBen Gardon 497cca2d0bSBen Gardon /* 507cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 517cca2d0bSBen Gardon * can run before the VM is torn down. 527cca2d0bSBen Gardon */ 537cca2d0bSBen Gardon rcu_barrier(); 5402c00b3aSBen Gardon } 5502c00b3aSBen Gardon 562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 576103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 586103bc07SBen Gardon bool shared); 592bdb3d84SBen Gardon 602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 61a889ea54SBen Gardon { 622bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 632bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 64a889ea54SBen Gardon } 65a889ea54SBen Gardon 66c0e64238SBen Gardon /* 67c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 68c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 69c0e64238SBen Gardon * the memory. 70c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 71c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 72c0e64238SBen Gardon * memory won't use it after it is freed. 73c0e64238SBen Gardon */ 74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 75a889ea54SBen Gardon { 76c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 77c0e64238SBen Gardon rcu_head); 78a889ea54SBen Gardon 79c0e64238SBen Gardon tdp_mmu_free_sp(sp); 80a889ea54SBen Gardon } 81a889ea54SBen Gardon 826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 836103bc07SBen Gardon bool shared) 842bdb3d84SBen Gardon { 856103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 862bdb3d84SBen Gardon 8711cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 882bdb3d84SBen Gardon return; 892bdb3d84SBen Gardon 902bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 912bdb3d84SBen Gardon 92c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 93c0e64238SBen Gardon list_del_rcu(&root->link); 94c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 952bdb3d84SBen Gardon 96524a1e4eSSean Christopherson zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); 972bdb3d84SBen Gardon 98c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 99a889ea54SBen Gardon } 100a889ea54SBen Gardon 101cfc10997SBen Gardon /* 102cfc10997SBen Gardon * Finds the next valid root after root (or the first valid root if root 103cfc10997SBen Gardon * is NULL), takes a reference on it, and returns that next root. If root 104cfc10997SBen Gardon * is not NULL, this thread should have already taken a reference on it, and 105cfc10997SBen Gardon * that reference will be dropped. If no valid root is found, this 106cfc10997SBen Gardon * function will return NULL. 107cfc10997SBen Gardon */ 108cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 1096103bc07SBen Gardon struct kvm_mmu_page *prev_root, 1106103bc07SBen Gardon bool shared) 111a889ea54SBen Gardon { 112a889ea54SBen Gardon struct kvm_mmu_page *next_root; 113a889ea54SBen Gardon 114c0e64238SBen Gardon rcu_read_lock(); 115c0e64238SBen Gardon 116cfc10997SBen Gardon if (prev_root) 117c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 118c0e64238SBen Gardon &prev_root->link, 119c0e64238SBen Gardon typeof(*prev_root), link); 120cfc10997SBen Gardon else 121c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 122cfc10997SBen Gardon typeof(*next_root), link); 123cfc10997SBen Gardon 124c0e64238SBen Gardon while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) 125c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 126c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 127fb101293SBen Gardon 128c0e64238SBen Gardon rcu_read_unlock(); 129cfc10997SBen Gardon 130cfc10997SBen Gardon if (prev_root) 1316103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 132cfc10997SBen Gardon 133a889ea54SBen Gardon return next_root; 134a889ea54SBen Gardon } 135a889ea54SBen Gardon 136a889ea54SBen Gardon /* 137a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 138a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 139a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 140a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 1416103bc07SBen Gardon * 1426103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 1436103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 1446103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 145a889ea54SBen Gardon */ 1466103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 1476103bc07SBen Gardon for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ 148cfc10997SBen Gardon _root; \ 1496103bc07SBen Gardon _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ 150a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 151a3f15bdaSSean Christopherson } else 152a889ea54SBen Gardon 153a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 154c0e64238SBen Gardon list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ 155c0e64238SBen Gardon lockdep_is_held_type(&kvm->mmu_lock, 0) || \ 156c0e64238SBen Gardon lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ 157a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 158a3f15bdaSSean Christopherson } else 15902c00b3aSBen Gardon 16002c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, 16102c00b3aSBen Gardon int level) 16202c00b3aSBen Gardon { 16302c00b3aSBen Gardon union kvm_mmu_page_role role; 16402c00b3aSBen Gardon 16502c00b3aSBen Gardon role = vcpu->arch.mmu->mmu_role.base; 16602c00b3aSBen Gardon role.level = level; 16702c00b3aSBen Gardon role.direct = true; 168bb3b394dSLai Jiangshan role.has_4_byte_gpte = false; 16902c00b3aSBen Gardon role.access = ACC_ALL; 17087e888eaSPaolo Bonzini role.ad_disabled = !shadow_accessed_mask; 17102c00b3aSBen Gardon 17202c00b3aSBen Gardon return role; 17302c00b3aSBen Gardon } 17402c00b3aSBen Gardon 17502c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, 17602c00b3aSBen Gardon int level) 17702c00b3aSBen Gardon { 17802c00b3aSBen Gardon struct kvm_mmu_page *sp; 17902c00b3aSBen Gardon 18002c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 18102c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 18202c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 18302c00b3aSBen Gardon 18402c00b3aSBen Gardon sp->role.word = page_role_for_level(vcpu, level).word; 18502c00b3aSBen Gardon sp->gfn = gfn; 18602c00b3aSBen Gardon sp->tdp_mmu_page = true; 18702c00b3aSBen Gardon 18833dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 18933dd3574SBen Gardon 19002c00b3aSBen Gardon return sp; 19102c00b3aSBen Gardon } 19202c00b3aSBen Gardon 1936e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 19402c00b3aSBen Gardon { 19502c00b3aSBen Gardon union kvm_mmu_page_role role; 19602c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 19702c00b3aSBen Gardon struct kvm_mmu_page *root; 19802c00b3aSBen Gardon 1996e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 20002c00b3aSBen Gardon 20102c00b3aSBen Gardon role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); 20202c00b3aSBen Gardon 20302c00b3aSBen Gardon /* Check for an existing root before allocating a new one. */ 204a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 205fb101293SBen Gardon if (root->role.word == role.word && 206fb101293SBen Gardon kvm_tdp_mmu_get_root(kvm, root)) 2076e6ec584SSean Christopherson goto out; 20802c00b3aSBen Gardon } 20902c00b3aSBen Gardon 21002c00b3aSBen Gardon root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); 21111cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 21202c00b3aSBen Gardon 213c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 214c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 215c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 21602c00b3aSBen Gardon 2176e6ec584SSean Christopherson out: 21802c00b3aSBen Gardon return __pa(root->spt); 219fe5db27dSBen Gardon } 2202f2fad08SBen Gardon 2212f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2229a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 2239a77daacSBen Gardon bool shared); 2242f2fad08SBen Gardon 225f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 226f8e14497SBen Gardon { 227f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 228f8e14497SBen Gardon return; 229f8e14497SBen Gardon 230f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 23164bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 23264bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 233f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 234f8e14497SBen Gardon } 235f8e14497SBen Gardon 236a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 237a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 238a6a0b05dSBen Gardon { 239a6a0b05dSBen Gardon bool pfn_changed; 240a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 241a6a0b05dSBen Gardon 242a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 243a6a0b05dSBen Gardon return; 244a6a0b05dSBen Gardon 245a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 246a6a0b05dSBen Gardon 247a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 248a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 249a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 250fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 251a6a0b05dSBen Gardon } 252a6a0b05dSBen Gardon } 253a6a0b05dSBen Gardon 2542f2fad08SBen Gardon /** 255a9442f59SBen Gardon * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU 256a9442f59SBen Gardon * 257a9442f59SBen Gardon * @kvm: kvm instance 258a9442f59SBen Gardon * @sp: the new page 259a9442f59SBen Gardon * @account_nx: This page replaces a NX large page and should be marked for 260a9442f59SBen Gardon * eventual reclaim. 261a9442f59SBen Gardon */ 262a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2639653f2daSSean Christopherson bool account_nx) 264a9442f59SBen Gardon { 2659a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 266a9442f59SBen Gardon list_add(&sp->link, &kvm->arch.tdp_mmu_pages); 267a9442f59SBen Gardon if (account_nx) 268a9442f59SBen Gardon account_huge_nx_page(kvm, sp); 2699a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 270a9442f59SBen Gardon } 271a9442f59SBen Gardon 272a9442f59SBen Gardon /** 273a9442f59SBen Gardon * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU 274a9442f59SBen Gardon * 275a9442f59SBen Gardon * @kvm: kvm instance 276a9442f59SBen Gardon * @sp: the page to be removed 2779a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 2789a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 2799a77daacSBen Gardon * threads that might be adding or removing pages. 280a9442f59SBen Gardon */ 2819a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2829a77daacSBen Gardon bool shared) 283a9442f59SBen Gardon { 2849a77daacSBen Gardon if (shared) 2859a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 2869a77daacSBen Gardon else 287a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 288a9442f59SBen Gardon 289a9442f59SBen Gardon list_del(&sp->link); 290a9442f59SBen Gardon if (sp->lpage_disallowed) 291a9442f59SBen Gardon unaccount_huge_nx_page(kvm, sp); 2929a77daacSBen Gardon 2939a77daacSBen Gardon if (shared) 2949a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 295a9442f59SBen Gardon } 296a9442f59SBen Gardon 297a9442f59SBen Gardon /** 298a066e61fSBen Gardon * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure 299a066e61fSBen Gardon * 300a066e61fSBen Gardon * @kvm: kvm instance 301a066e61fSBen Gardon * @pt: the page removed from the paging structure 3029a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 3039a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 3049a77daacSBen Gardon * threads that might be modifying SPTEs. 305a066e61fSBen Gardon * 306a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 307a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 30870fb3e41SBen Gardon * 30970fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 31070fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 31170fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 31270fb3e41SBen Gardon * early rcu_dereferences in the function. 313a066e61fSBen Gardon */ 31470fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, 3159a77daacSBen Gardon bool shared) 316a066e61fSBen Gardon { 31770fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 318a066e61fSBen Gardon int level = sp->role.level; 319e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 320a066e61fSBen Gardon int i; 321a066e61fSBen Gardon 322a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 323a066e61fSBen Gardon 3249a77daacSBen Gardon tdp_mmu_unlink_page(kvm, sp, shared); 325a066e61fSBen Gardon 326a066e61fSBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 327574c3c55SBen Gardon u64 *sptep = rcu_dereference(pt) + i; 328574c3c55SBen Gardon gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 329574c3c55SBen Gardon u64 old_child_spte; 3309a77daacSBen Gardon 3319a77daacSBen Gardon if (shared) { 332e25f0e0cSBen Gardon /* 333e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 334e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 335e25f0e0cSBen Gardon * already marked as removed then another thread 336e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 337e25f0e0cSBen Gardon * set the SPTE until it is set from some other 338e25f0e0cSBen Gardon * value to the removed SPTE value. 339e25f0e0cSBen Gardon */ 340e25f0e0cSBen Gardon for (;;) { 341e25f0e0cSBen Gardon old_child_spte = xchg(sptep, REMOVED_SPTE); 342e25f0e0cSBen Gardon if (!is_removed_spte(old_child_spte)) 343e25f0e0cSBen Gardon break; 344e25f0e0cSBen Gardon cpu_relax(); 345e25f0e0cSBen Gardon } 3469a77daacSBen Gardon } else { 3478df9f1afSSean Christopherson /* 3488df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 3498df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 3508df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 3518df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 3528df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 3538df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 3548df9f1afSSean Christopherson * unreachable. 3558df9f1afSSean Christopherson */ 3569a77daacSBen Gardon old_child_spte = READ_ONCE(*sptep); 3578df9f1afSSean Christopherson if (!is_shadow_present_pte(old_child_spte)) 3588df9f1afSSean Christopherson continue; 359e25f0e0cSBen Gardon 360e25f0e0cSBen Gardon /* 361e25f0e0cSBen Gardon * Marking the SPTE as a removed SPTE is not 362e25f0e0cSBen Gardon * strictly necessary here as the MMU lock will 363e25f0e0cSBen Gardon * stop other threads from concurrently modifying 364e25f0e0cSBen Gardon * this SPTE. Using the removed SPTE value keeps 365e25f0e0cSBen Gardon * the two branches consistent and simplifies 366e25f0e0cSBen Gardon * the function. 367e25f0e0cSBen Gardon */ 368e25f0e0cSBen Gardon WRITE_ONCE(*sptep, REMOVED_SPTE); 3699a77daacSBen Gardon } 370e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 371f1b83255SKai Huang old_child_spte, REMOVED_SPTE, level, 372e25f0e0cSBen Gardon shared); 373a066e61fSBen Gardon } 374a066e61fSBen Gardon 375574c3c55SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, base_gfn, 376f1b83255SKai Huang KVM_PAGES_PER_HPAGE(level + 1)); 377a066e61fSBen Gardon 3787cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 379a066e61fSBen Gardon } 380a066e61fSBen Gardon 381a066e61fSBen Gardon /** 3827f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 3832f2fad08SBen Gardon * @kvm: kvm instance 3842f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 3852f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 3862f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 3872f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 3882f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 3899a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 3909a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 3919a77daacSBen Gardon * threads that might be modifying SPTEs. 3922f2fad08SBen Gardon * 3932f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 3942f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 3952f2fad08SBen Gardon */ 3962f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3979a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 3989a77daacSBen Gardon bool shared) 3992f2fad08SBen Gardon { 4002f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 4012f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 4022f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 4032f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 4042f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 4052f2fad08SBen Gardon 4062f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 4072f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 408764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 4092f2fad08SBen Gardon 4102f2fad08SBen Gardon /* 4112f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 4122f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 4132f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 414d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 4152f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 4162f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 4172f2fad08SBen Gardon * thread before replacement. 4182f2fad08SBen Gardon */ 4192f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 4202f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 4212f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 4222f2fad08SBen Gardon "different PFN!\n" 4232f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4242f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4252f2fad08SBen Gardon 4262f2fad08SBen Gardon /* 4272f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 428d9f6e12fSIngo Molnar * corruption. 4292f2fad08SBen Gardon */ 4302f2fad08SBen Gardon BUG(); 4312f2fad08SBen Gardon } 4322f2fad08SBen Gardon 4332f2fad08SBen Gardon if (old_spte == new_spte) 4342f2fad08SBen Gardon return; 4352f2fad08SBen Gardon 436b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 437b9a98c34SBen Gardon 4382f2fad08SBen Gardon /* 4392f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 4402f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 4412f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 4422f2fad08SBen Gardon */ 4432f2fad08SBen Gardon if (!was_present && !is_present) { 4442f2fad08SBen Gardon /* 44508f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 44608f07c80SBen Gardon * it is unexpected. Log the change, though it should not 44708f07c80SBen Gardon * impact the guest since both the former and current SPTEs 44808f07c80SBen Gardon * are nonpresent. 4492f2fad08SBen Gardon */ 45008f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 45108f07c80SBen Gardon !is_mmio_spte(new_spte) && 45208f07c80SBen Gardon !is_removed_spte(new_spte))) 4532f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 4542f2fad08SBen Gardon "should not be replaced with another,\n" 4552f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 45608f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 45708f07c80SBen Gardon "a temporary removed SPTE.\n" 4582f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4592f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4602f2fad08SBen Gardon return; 4612f2fad08SBen Gardon } 4622f2fad08SBen Gardon 46371f51d2cSMingwei Zhang if (is_leaf != was_leaf) 46471f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 4652f2fad08SBen Gardon 4662f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 46764bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 4682f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 4692f2fad08SBen Gardon 4702f2fad08SBen Gardon /* 4712f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 4722f2fad08SBen Gardon * the paging structure. 4732f2fad08SBen Gardon */ 474a066e61fSBen Gardon if (was_present && !was_leaf && (pfn_changed || !is_present)) 475a066e61fSBen Gardon handle_removed_tdp_mmu_page(kvm, 4769a77daacSBen Gardon spte_to_child_pt(old_spte, level), shared); 4772f2fad08SBen Gardon } 4782f2fad08SBen Gardon 4792f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4809a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4819a77daacSBen Gardon bool shared) 4822f2fad08SBen Gardon { 4839a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 4849a77daacSBen Gardon shared); 485f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 486a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 487a6a0b05dSBen Gardon new_spte, level); 4882f2fad08SBen Gardon } 489faaf05b0SBen Gardon 490fe43fa2fSBen Gardon /* 4916ccf4438SPaolo Bonzini * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 4926ccf4438SPaolo Bonzini * and handle the associated bookkeeping. Do not mark the page dirty 49324ae4cfaSBen Gardon * in KVM's dirty bitmaps. 4949a77daacSBen Gardon * 4959a77daacSBen Gardon * @kvm: kvm instance 4969a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 4979a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 4989a77daacSBen Gardon * Returns: true if the SPTE was set, false if it was not. If false is returned, 4999a77daacSBen Gardon * this function will have no side-effects. 5009a77daacSBen Gardon */ 5016ccf4438SPaolo Bonzini static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, 5029a77daacSBen Gardon struct tdp_iter *iter, 5039a77daacSBen Gardon u64 new_spte) 5049a77daacSBen Gardon { 5053a0f64deSSean Christopherson WARN_ON_ONCE(iter->yielded); 5063a0f64deSSean Christopherson 5079a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 5089a77daacSBen Gardon 50908f07c80SBen Gardon /* 51008f07c80SBen Gardon * Do not change removed SPTEs. Only the thread that froze the SPTE 51108f07c80SBen Gardon * may modify it. 51208f07c80SBen Gardon */ 5137a51393aSSean Christopherson if (is_removed_spte(iter->old_spte)) 51408f07c80SBen Gardon return false; 51508f07c80SBen Gardon 5166e8eb206SDavid Matlack /* 5176e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 5186e8eb206SDavid Matlack * does not hold the mmu_lock. 5196e8eb206SDavid Matlack */ 5209a77daacSBen Gardon if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, 5219a77daacSBen Gardon new_spte) != iter->old_spte) 5229a77daacSBen Gardon return false; 5239a77daacSBen Gardon 52424ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 52508889894SSean Christopherson new_spte, iter->level, true); 52624ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 5279a77daacSBen Gardon 5289a77daacSBen Gardon return true; 5299a77daacSBen Gardon } 5309a77daacSBen Gardon 53108f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, 53208f07c80SBen Gardon struct tdp_iter *iter) 53308f07c80SBen Gardon { 53408f07c80SBen Gardon /* 53508f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 53608f07c80SBen Gardon * non-present value. This will stop other threads from 53708f07c80SBen Gardon * immediately installing a present entry in its place 53808f07c80SBen Gardon * before the TLBs are flushed. 53908f07c80SBen Gardon */ 5406ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) 54108f07c80SBen Gardon return false; 54208f07c80SBen Gardon 54308f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 54408f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 54508f07c80SBen Gardon 54608f07c80SBen Gardon /* 54708f07c80SBen Gardon * No other thread can overwrite the removed SPTE as they 54808f07c80SBen Gardon * must either wait on the MMU lock or use 549d9f6e12fSIngo Molnar * tdp_mmu_set_spte_atomic which will not overwrite the 55008f07c80SBen Gardon * special removed SPTE value. No bookkeeping is needed 55108f07c80SBen Gardon * here since the SPTE is going from non-present 55208f07c80SBen Gardon * to non-present. 55308f07c80SBen Gardon */ 55414f6fec2SBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), 0); 55508f07c80SBen Gardon 55608f07c80SBen Gardon return true; 55708f07c80SBen Gardon } 55808f07c80SBen Gardon 5599a77daacSBen Gardon 5609a77daacSBen Gardon /* 561fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 562fe43fa2fSBen Gardon * @kvm: kvm instance 563fe43fa2fSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 564fe43fa2fSBen Gardon * @new_spte: The value the SPTE should be set to 565fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 566fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 567fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 568fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 569fe43fa2fSBen Gardon * double counted. 570fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 571fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 572fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 573fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 574fe43fa2fSBen Gardon * writes from being double counted. 575fe43fa2fSBen Gardon */ 576f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 577a6a0b05dSBen Gardon u64 new_spte, bool record_acc_track, 578a6a0b05dSBen Gardon bool record_dirty_log) 579faaf05b0SBen Gardon { 5803a0f64deSSean Christopherson WARN_ON_ONCE(iter->yielded); 5813a0f64deSSean Christopherson 582531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 5833a9a4aa5SBen Gardon 58408f07c80SBen Gardon /* 58508f07c80SBen Gardon * No thread should be using this function to set SPTEs to the 58608f07c80SBen Gardon * temporary removed SPTE value. 58708f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 58808f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 58908f07c80SBen Gardon * use of the removed SPTE should not be necessary. 59008f07c80SBen Gardon */ 5917a51393aSSean Christopherson WARN_ON(is_removed_spte(iter->old_spte)); 59208f07c80SBen Gardon 5937cca2d0bSBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); 594faaf05b0SBen Gardon 59508889894SSean Christopherson __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 59608889894SSean Christopherson new_spte, iter->level, false); 597f8e14497SBen Gardon if (record_acc_track) 598f8e14497SBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, 599f8e14497SBen Gardon iter->level); 600a6a0b05dSBen Gardon if (record_dirty_log) 60108889894SSean Christopherson handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 602a6a0b05dSBen Gardon iter->old_spte, new_spte, 603a6a0b05dSBen Gardon iter->level); 604f8e14497SBen Gardon } 605f8e14497SBen Gardon 606f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 607f8e14497SBen Gardon u64 new_spte) 608f8e14497SBen Gardon { 609a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 610f8e14497SBen Gardon } 611f8e14497SBen Gardon 612f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 613f8e14497SBen Gardon struct tdp_iter *iter, 614f8e14497SBen Gardon u64 new_spte) 615f8e14497SBen Gardon { 616a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 617a6a0b05dSBen Gardon } 618a6a0b05dSBen Gardon 619a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 620a6a0b05dSBen Gardon struct tdp_iter *iter, 621a6a0b05dSBen Gardon u64 new_spte) 622a6a0b05dSBen Gardon { 623a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 624faaf05b0SBen Gardon } 625faaf05b0SBen Gardon 626faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 627faaf05b0SBen Gardon for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) 628faaf05b0SBen Gardon 629f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 630f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 631f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 632f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 633f8e14497SBen Gardon continue; \ 634f8e14497SBen Gardon else 635f8e14497SBen Gardon 636bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 637bb18842eSBen Gardon for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ 638bb18842eSBen Gardon _mmu->shadow_root_level, _start, _end) 639bb18842eSBen Gardon 640faaf05b0SBen Gardon /* 641e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 642e28a436cSBen Gardon * to the scheduler. 643e28a436cSBen Gardon * 644e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 645e139a34eSBen Gardon * TLB flush before yielding. 646e139a34eSBen Gardon * 6473a0f64deSSean Christopherson * If this function yields, iter->yielded is set and the caller must skip to 6483a0f64deSSean Christopherson * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 6493a0f64deSSean Christopherson * over the paging structures to allow the iterator to continue its traversal 6503a0f64deSSean Christopherson * from the paging structure root. 651e28a436cSBen Gardon * 6523a0f64deSSean Christopherson * Returns true if this function yielded. 653e28a436cSBen Gardon */ 6543a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 6553a0f64deSSean Christopherson struct tdp_iter *iter, 6563a0f64deSSean Christopherson bool flush, bool shared) 657a6a0b05dSBen Gardon { 6583a0f64deSSean Christopherson WARN_ON(iter->yielded); 6593a0f64deSSean Christopherson 660ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 661ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 662ed5e484bSBen Gardon return false; 663ed5e484bSBen Gardon 664531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 6657cca2d0bSBen Gardon rcu_read_unlock(); 6667cca2d0bSBen Gardon 667e139a34eSBen Gardon if (flush) 668e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 669e139a34eSBen Gardon 6706103bc07SBen Gardon if (shared) 6716103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 6726103bc07SBen Gardon else 673531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 6746103bc07SBen Gardon 6757cca2d0bSBen Gardon rcu_read_lock(); 676ed5e484bSBen Gardon 677ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 678ed5e484bSBen Gardon 6793a0f64deSSean Christopherson iter->yielded = true; 680a6a0b05dSBen Gardon } 681e28a436cSBen Gardon 6823a0f64deSSean Christopherson return iter->yielded; 683a6a0b05dSBen Gardon } 684a6a0b05dSBen Gardon 685faaf05b0SBen Gardon /* 686faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 687faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 688faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 689faaf05b0SBen Gardon * MMU lock. 6906103bc07SBen Gardon * 691063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 692063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 693063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 694063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 6956103bc07SBen Gardon * operation can cause a soft lockup. 6966103bc07SBen Gardon * 6976103bc07SBen Gardon * If shared is true, this thread holds the MMU lock in read mode and must 6986103bc07SBen Gardon * account for the possibility that other threads are modifying the paging 6996103bc07SBen Gardon * structures concurrently. If shared is false, this thread should hold the 7006103bc07SBen Gardon * MMU lock in write mode. 701faaf05b0SBen Gardon */ 702faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 7036103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 7046103bc07SBen Gardon bool shared) 705faaf05b0SBen Gardon { 706524a1e4eSSean Christopherson gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 707524a1e4eSSean Christopherson bool zap_all = (start == 0 && end >= max_gfn_host); 708faaf05b0SBen Gardon struct tdp_iter iter; 709faaf05b0SBen Gardon 710524a1e4eSSean Christopherson /* 7110103098fSSean Christopherson * No need to try to step down in the iterator when zapping all SPTEs, 7120103098fSSean Christopherson * zapping the top-level non-leaf SPTEs will recurse on their children. 7130103098fSSean Christopherson */ 7140103098fSSean Christopherson int min_level = zap_all ? root->role.level : PG_LEVEL_4K; 7150103098fSSean Christopherson 7160103098fSSean Christopherson /* 717524a1e4eSSean Christopherson * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will 718524a1e4eSSean Christopherson * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, 719524a1e4eSSean Christopherson * and so KVM will never install a SPTE for such addresses. 720524a1e4eSSean Christopherson */ 721524a1e4eSSean Christopherson end = min(end, max_gfn_host); 722524a1e4eSSean Christopherson 7236103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 7246103bc07SBen Gardon 7257cca2d0bSBen Gardon rcu_read_lock(); 7267cca2d0bSBen Gardon 7270103098fSSean Christopherson for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 7280103098fSSean Christopherson min_level, start, end) { 7296103bc07SBen Gardon retry: 7301af4a960SBen Gardon if (can_yield && 7316103bc07SBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { 732a835429cSSean Christopherson flush = false; 7331af4a960SBen Gardon continue; 7341af4a960SBen Gardon } 7351af4a960SBen Gardon 736faaf05b0SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 737faaf05b0SBen Gardon continue; 738faaf05b0SBen Gardon 739faaf05b0SBen Gardon /* 740faaf05b0SBen Gardon * If this is a non-last-level SPTE that covers a larger range 741faaf05b0SBen Gardon * than should be zapped, continue, and zap the mappings at a 742524a1e4eSSean Christopherson * lower level, except when zapping all SPTEs. 743faaf05b0SBen Gardon */ 744524a1e4eSSean Christopherson if (!zap_all && 745524a1e4eSSean Christopherson (iter.gfn < start || 746faaf05b0SBen Gardon iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 747faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 748faaf05b0SBen Gardon continue; 749faaf05b0SBen Gardon 7506103bc07SBen Gardon if (!shared) { 751faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 752a835429cSSean Christopherson flush = true; 7536103bc07SBen Gardon } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 7546103bc07SBen Gardon /* 7556103bc07SBen Gardon * The iter must explicitly re-read the SPTE because 7566103bc07SBen Gardon * the atomic cmpxchg failed. 7576103bc07SBen Gardon */ 7586103bc07SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 7596103bc07SBen Gardon goto retry; 7606103bc07SBen Gardon } 761faaf05b0SBen Gardon } 7627cca2d0bSBen Gardon 7637cca2d0bSBen Gardon rcu_read_unlock(); 764a835429cSSean Christopherson return flush; 765faaf05b0SBen Gardon } 766faaf05b0SBen Gardon 767faaf05b0SBen Gardon /* 768faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 769faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 770faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 771faaf05b0SBen Gardon * MMU lock. 772faaf05b0SBen Gardon */ 7732b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 7745a324c24SSean Christopherson gfn_t end, bool can_yield, bool flush) 775faaf05b0SBen Gardon { 776faaf05b0SBen Gardon struct kvm_mmu_page *root; 777faaf05b0SBen Gardon 7785a324c24SSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false) 7796103bc07SBen Gardon flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, 7805a324c24SSean Christopherson false); 781faaf05b0SBen Gardon 782faaf05b0SBen Gardon return flush; 783faaf05b0SBen Gardon } 784faaf05b0SBen Gardon 785faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 786faaf05b0SBen Gardon { 7872b9663d8SSean Christopherson bool flush = false; 7882b9663d8SSean Christopherson int i; 789faaf05b0SBen Gardon 7902b9663d8SSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 7915a324c24SSean Christopherson flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush); 7922b9663d8SSean Christopherson 793faaf05b0SBen Gardon if (flush) 794faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 795faaf05b0SBen Gardon } 796bb18842eSBen Gardon 7974c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, 7984c6654bdSBen Gardon struct kvm_mmu_page *prev_root) 7994c6654bdSBen Gardon { 8004c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8014c6654bdSBen Gardon 8024c6654bdSBen Gardon if (prev_root) 8034c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8044c6654bdSBen Gardon &prev_root->link, 8054c6654bdSBen Gardon typeof(*prev_root), link); 8064c6654bdSBen Gardon else 8074c6654bdSBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8084c6654bdSBen Gardon typeof(*next_root), link); 8094c6654bdSBen Gardon 8104c6654bdSBen Gardon while (next_root && !(next_root->role.invalid && 8114c6654bdSBen Gardon refcount_read(&next_root->tdp_mmu_root_count))) 8124c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8134c6654bdSBen Gardon &next_root->link, 8144c6654bdSBen Gardon typeof(*next_root), link); 8154c6654bdSBen Gardon 8164c6654bdSBen Gardon return next_root; 8174c6654bdSBen Gardon } 8184c6654bdSBen Gardon 8194c6654bdSBen Gardon /* 8204c6654bdSBen Gardon * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each 8214c6654bdSBen Gardon * invalidated root, they will not be freed until this function drops the 8224c6654bdSBen Gardon * reference. Before dropping that reference, tear down the paging 8234c6654bdSBen Gardon * structure so that whichever thread does drop the last reference 8244c6654bdSBen Gardon * only has to do a trivial amount of work. Since the roots are invalid, 8254c6654bdSBen Gardon * no new SPTEs should be created under them. 8264c6654bdSBen Gardon */ 8274c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 8284c6654bdSBen Gardon { 8294c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8304c6654bdSBen Gardon struct kvm_mmu_page *root; 8314c6654bdSBen Gardon bool flush = false; 8324c6654bdSBen Gardon 8334c6654bdSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 8344c6654bdSBen Gardon 8354c6654bdSBen Gardon rcu_read_lock(); 8364c6654bdSBen Gardon 8374c6654bdSBen Gardon root = next_invalidated_root(kvm, NULL); 8384c6654bdSBen Gardon 8394c6654bdSBen Gardon while (root) { 8404c6654bdSBen Gardon next_root = next_invalidated_root(kvm, root); 8414c6654bdSBen Gardon 8424c6654bdSBen Gardon rcu_read_unlock(); 8434c6654bdSBen Gardon 844524a1e4eSSean Christopherson flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); 8454c6654bdSBen Gardon 8464c6654bdSBen Gardon /* 8474c6654bdSBen Gardon * Put the reference acquired in 8484c6654bdSBen Gardon * kvm_tdp_mmu_invalidate_roots 8494c6654bdSBen Gardon */ 8504c6654bdSBen Gardon kvm_tdp_mmu_put_root(kvm, root, true); 8514c6654bdSBen Gardon 8524c6654bdSBen Gardon root = next_root; 8534c6654bdSBen Gardon 8544c6654bdSBen Gardon rcu_read_lock(); 8554c6654bdSBen Gardon } 8564c6654bdSBen Gardon 8574c6654bdSBen Gardon rcu_read_unlock(); 8584c6654bdSBen Gardon 8594c6654bdSBen Gardon if (flush) 8604c6654bdSBen Gardon kvm_flush_remote_tlbs(kvm); 8614c6654bdSBen Gardon } 8624c6654bdSBen Gardon 863bb18842eSBen Gardon /* 864b7cccd39SBen Gardon * Mark each TDP MMU root as invalid so that other threads 865b7cccd39SBen Gardon * will drop their references and allow the root count to 866b7cccd39SBen Gardon * go to 0. 867b7cccd39SBen Gardon * 8684c6654bdSBen Gardon * Also take a reference on all roots so that this thread 8694c6654bdSBen Gardon * can do the bulk of the work required to free the roots 8704c6654bdSBen Gardon * once they are invalidated. Without this reference, a 8714c6654bdSBen Gardon * vCPU thread might drop the last reference to a root and 8724c6654bdSBen Gardon * get stuck with tearing down the entire paging structure. 8734c6654bdSBen Gardon * 8744c6654bdSBen Gardon * Roots which have a zero refcount should be skipped as 8754c6654bdSBen Gardon * they're already being torn down. 8764c6654bdSBen Gardon * Already invalid roots should be referenced again so that 8774c6654bdSBen Gardon * they aren't freed before kvm_tdp_mmu_zap_all_fast is 8784c6654bdSBen Gardon * done with them. 8794c6654bdSBen Gardon * 880b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 881b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 882b7cccd39SBen Gardon */ 883b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 884b7cccd39SBen Gardon { 885b7cccd39SBen Gardon struct kvm_mmu_page *root; 886b7cccd39SBen Gardon 887b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 888b7cccd39SBen Gardon list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) 8894c6654bdSBen Gardon if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) 890b7cccd39SBen Gardon root->role.invalid = true; 891b7cccd39SBen Gardon } 892b7cccd39SBen Gardon 893bb18842eSBen Gardon /* 894bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 895bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 896bb18842eSBen Gardon */ 897cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 898cdc47767SPaolo Bonzini struct kvm_page_fault *fault, 899cdc47767SPaolo Bonzini struct tdp_iter *iter) 900bb18842eSBen Gardon { 901c435d4b7SSean Christopherson struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 902bb18842eSBen Gardon u64 new_spte; 90357a3e96dSKai Huang int ret = RET_PF_FIXED; 904ad67e480SPaolo Bonzini bool wrprot = false; 905bb18842eSBen Gardon 9067158bee4SPaolo Bonzini WARN_ON(sp->role.level != fault->goal_level); 907e710c5f6SDavid Matlack if (unlikely(!fault->slot)) 908bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 9099a77daacSBen Gardon else 91053597858SDavid Matlack wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 9112839180cSPaolo Bonzini fault->pfn, iter->old_spte, fault->prefetch, true, 9127158bee4SPaolo Bonzini fault->map_writable, &new_spte); 913bb18842eSBen Gardon 914bb18842eSBen Gardon if (new_spte == iter->old_spte) 915bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 9166ccf4438SPaolo Bonzini else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 9179a77daacSBen Gardon return RET_PF_RETRY; 918bb18842eSBen Gardon 919bb18842eSBen Gardon /* 920bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 921bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 922bb18842eSBen Gardon * the vCPU would have the same fault again. 923bb18842eSBen Gardon */ 924ad67e480SPaolo Bonzini if (wrprot) { 925cdc47767SPaolo Bonzini if (fault->write) 926bb18842eSBen Gardon ret = RET_PF_EMULATE; 927bb18842eSBen Gardon } 928bb18842eSBen Gardon 929bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 9309a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 9319a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 9329a77daacSBen Gardon new_spte); 933bb18842eSBen Gardon ret = RET_PF_EMULATE; 9343849e092SSean Christopherson } else { 9359a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 9369a77daacSBen Gardon rcu_dereference(iter->sptep)); 9373849e092SSean Christopherson } 938bb18842eSBen Gardon 939857f8474SKai Huang /* 940857f8474SKai Huang * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be 941857f8474SKai Huang * consistent with legacy MMU behavior. 942857f8474SKai Huang */ 943857f8474SKai Huang if (ret != RET_PF_SPURIOUS) 944bb18842eSBen Gardon vcpu->stat.pf_fixed++; 945bb18842eSBen Gardon 946bb18842eSBen Gardon return ret; 947bb18842eSBen Gardon } 948bb18842eSBen Gardon 949bb18842eSBen Gardon /* 950bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 951bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 952bb18842eSBen Gardon */ 9532f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 954bb18842eSBen Gardon { 955bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 956bb18842eSBen Gardon struct tdp_iter iter; 95789c0fd49SBen Gardon struct kvm_mmu_page *sp; 958bb18842eSBen Gardon u64 *child_pt; 959bb18842eSBen Gardon u64 new_spte; 960bb18842eSBen Gardon int ret; 961bb18842eSBen Gardon 96273a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault); 963bb18842eSBen Gardon 964f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault); 9657cca2d0bSBen Gardon 9667cca2d0bSBen Gardon rcu_read_lock(); 9677cca2d0bSBen Gardon 9682f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 96973a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled) 970536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 971bb18842eSBen Gardon 97273a3c659SPaolo Bonzini if (iter.level == fault->goal_level) 973bb18842eSBen Gardon break; 974bb18842eSBen Gardon 975bb18842eSBen Gardon /* 976bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 977bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 978bb18842eSBen Gardon * with a non-leaf SPTE. 979bb18842eSBen Gardon */ 980bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 981bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 98208f07c80SBen Gardon if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 9839a77daacSBen Gardon break; 984bb18842eSBen Gardon 985bb18842eSBen Gardon /* 986bb18842eSBen Gardon * The iter must explicitly re-read the spte here 987bb18842eSBen Gardon * because the new value informs the !present 988bb18842eSBen Gardon * path below. 989bb18842eSBen Gardon */ 9907cca2d0bSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 991bb18842eSBen Gardon } 992bb18842eSBen Gardon 993bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 994ff76d506SKai Huang /* 995c4342633SIngo Molnar * If SPTE has been frozen by another thread, just 996ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 997ff76d506SKai Huang * allocation and free. 998ff76d506SKai Huang */ 999ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1000ff76d506SKai Huang break; 1001ff76d506SKai Huang 1002f1b83255SKai Huang sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1); 100389c0fd49SBen Gardon child_pt = sp->spt; 1004a9442f59SBen Gardon 1005bb18842eSBen Gardon new_spte = make_nonleaf_spte(child_pt, 1006bb18842eSBen Gardon !shadow_accessed_mask); 1007bb18842eSBen Gardon 10086ccf4438SPaolo Bonzini if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) { 10099653f2daSSean Christopherson tdp_mmu_link_page(vcpu->kvm, sp, 101073a3c659SPaolo Bonzini fault->huge_page_disallowed && 101173a3c659SPaolo Bonzini fault->req_level >= iter.level); 10129a77daacSBen Gardon 1013bb18842eSBen Gardon trace_kvm_mmu_get_page(sp, true); 10149a77daacSBen Gardon } else { 10159a77daacSBen Gardon tdp_mmu_free_sp(sp); 10169a77daacSBen Gardon break; 10179a77daacSBen Gardon } 1018bb18842eSBen Gardon } 1019bb18842eSBen Gardon } 1020bb18842eSBen Gardon 102173a3c659SPaolo Bonzini if (iter.level != fault->goal_level) { 10227cca2d0bSBen Gardon rcu_read_unlock(); 1023bb18842eSBen Gardon return RET_PF_RETRY; 10247cca2d0bSBen Gardon } 1025bb18842eSBen Gardon 1026cdc47767SPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 10277cca2d0bSBen Gardon rcu_read_unlock(); 1028bb18842eSBen Gardon 1029bb18842eSBen Gardon return ret; 1030bb18842eSBen Gardon } 1031063afacdSBen Gardon 10323039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 10333039bcc7SSean Christopherson bool flush) 1034063afacdSBen Gardon { 1035063afacdSBen Gardon struct kvm_mmu_page *root; 1036063afacdSBen Gardon 103775333772SSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) 1038c7785d85SHou Wenlong flush = zap_gfn_range(kvm, root, range->start, range->end, 10396103bc07SBen Gardon range->may_block, flush, false); 1040063afacdSBen Gardon 10413039bcc7SSean Christopherson return flush; 10423039bcc7SSean Christopherson } 10433039bcc7SSean Christopherson 10443039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 10453039bcc7SSean Christopherson struct kvm_gfn_range *range); 10463039bcc7SSean Christopherson 10473039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 10483039bcc7SSean Christopherson struct kvm_gfn_range *range, 1049c1b91493SSean Christopherson tdp_handler_t handler) 1050063afacdSBen Gardon { 1051063afacdSBen Gardon struct kvm_mmu_page *root; 10523039bcc7SSean Christopherson struct tdp_iter iter; 10533039bcc7SSean Christopherson bool ret = false; 1054063afacdSBen Gardon 10553039bcc7SSean Christopherson rcu_read_lock(); 1056063afacdSBen Gardon 1057063afacdSBen Gardon /* 1058e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1059e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1060063afacdSBen Gardon */ 10613039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 10623039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 10633039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 10643039bcc7SSean Christopherson } 1065063afacdSBen Gardon 10663039bcc7SSean Christopherson rcu_read_unlock(); 1067063afacdSBen Gardon 1068063afacdSBen Gardon return ret; 1069063afacdSBen Gardon } 1070063afacdSBen Gardon 1071f8e14497SBen Gardon /* 1072f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1073f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1074f8e14497SBen Gardon */ 10753039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 10763039bcc7SSean Christopherson struct kvm_gfn_range *range) 1077f8e14497SBen Gardon { 1078f8e14497SBen Gardon u64 new_spte = 0; 1079f8e14497SBen Gardon 10803039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 10813039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 10823039bcc7SSean Christopherson return false; 10837cca2d0bSBen Gardon 10843039bcc7SSean Christopherson new_spte = iter->old_spte; 1085f8e14497SBen Gardon 1086f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 10878f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1088f8e14497SBen Gardon } else { 1089f8e14497SBen Gardon /* 1090f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1091f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1092f8e14497SBen Gardon */ 1093f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1094f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1095f8e14497SBen Gardon 1096f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1097f8e14497SBen Gardon } 1098f8e14497SBen Gardon 10993039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 110033dd3574SBen Gardon 11013039bcc7SSean Christopherson return true; 1102f8e14497SBen Gardon } 1103f8e14497SBen Gardon 11043039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1105f8e14497SBen Gardon { 11063039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1107f8e14497SBen Gardon } 1108f8e14497SBen Gardon 11093039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 11103039bcc7SSean Christopherson struct kvm_gfn_range *range) 1111f8e14497SBen Gardon { 11123039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1113f8e14497SBen Gardon } 1114f8e14497SBen Gardon 11153039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1116f8e14497SBen Gardon { 11173039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 11183039bcc7SSean Christopherson } 11193039bcc7SSean Christopherson 11203039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 11213039bcc7SSean Christopherson struct kvm_gfn_range *range) 11223039bcc7SSean Christopherson { 11233039bcc7SSean Christopherson u64 new_spte; 11243039bcc7SSean Christopherson 11253039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 11263039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 11273039bcc7SSean Christopherson 11283039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 11293039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 11303039bcc7SSean Christopherson return false; 11313039bcc7SSean Christopherson 11323039bcc7SSean Christopherson /* 11333039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 11343039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 11353039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 11363039bcc7SSean Christopherson * See __handle_changed_spte(). 11373039bcc7SSean Christopherson */ 11383039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 11393039bcc7SSean Christopherson 11403039bcc7SSean Christopherson if (!pte_write(range->pte)) { 11413039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 11423039bcc7SSean Christopherson pte_pfn(range->pte)); 11433039bcc7SSean Christopherson 11443039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 11453039bcc7SSean Christopherson } 11463039bcc7SSean Christopherson 11473039bcc7SSean Christopherson return true; 1148f8e14497SBen Gardon } 11491d8dd6b3SBen Gardon 11501d8dd6b3SBen Gardon /* 11511d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 11521d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 11531d8dd6b3SBen Gardon * notifier. 11541d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 11551d8dd6b3SBen Gardon */ 11563039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 11571d8dd6b3SBen Gardon { 11583039bcc7SSean Christopherson bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 11591d8dd6b3SBen Gardon 11603039bcc7SSean Christopherson /* FIXME: return 'flush' instead of flushing here. */ 11613039bcc7SSean Christopherson if (flush) 11623039bcc7SSean Christopherson kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); 11637cca2d0bSBen Gardon 11643039bcc7SSean Christopherson return false; 11651d8dd6b3SBen Gardon } 11661d8dd6b3SBen Gardon 1167a6a0b05dSBen Gardon /* 1168bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1169bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1170bedd9195SDavid Matlack * be flushed. 1171a6a0b05dSBen Gardon */ 1172a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1173a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1174a6a0b05dSBen Gardon { 1175a6a0b05dSBen Gardon struct tdp_iter iter; 1176a6a0b05dSBen Gardon u64 new_spte; 1177a6a0b05dSBen Gardon bool spte_set = false; 1178a6a0b05dSBen Gardon 11797cca2d0bSBen Gardon rcu_read_lock(); 11807cca2d0bSBen Gardon 1181a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1182a6a0b05dSBen Gardon 1183a6a0b05dSBen Gardon for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 1184a6a0b05dSBen Gardon min_level, start, end) { 118524ae4cfaSBen Gardon retry: 118624ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 11871af4a960SBen Gardon continue; 11881af4a960SBen Gardon 1189a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 11900f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 11910f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1192a6a0b05dSBen Gardon continue; 1193a6a0b05dSBen Gardon 1194a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1195a6a0b05dSBen Gardon 11966ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { 119724ae4cfaSBen Gardon /* 119824ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 119924ae4cfaSBen Gardon * the atomic cmpxchg failed. 120024ae4cfaSBen Gardon */ 120124ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 120224ae4cfaSBen Gardon goto retry; 120324ae4cfaSBen Gardon } 1204a6a0b05dSBen Gardon spte_set = true; 1205a6a0b05dSBen Gardon } 12067cca2d0bSBen Gardon 12077cca2d0bSBen Gardon rcu_read_unlock(); 1208a6a0b05dSBen Gardon return spte_set; 1209a6a0b05dSBen Gardon } 1210a6a0b05dSBen Gardon 1211a6a0b05dSBen Gardon /* 1212a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1213a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1214a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1215a6a0b05dSBen Gardon */ 1216269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1217269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1218a6a0b05dSBen Gardon { 1219a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1220a6a0b05dSBen Gardon bool spte_set = false; 1221a6a0b05dSBen Gardon 122224ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1223a6a0b05dSBen Gardon 122424ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1225a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1226a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1227a6a0b05dSBen Gardon 1228a6a0b05dSBen Gardon return spte_set; 1229a6a0b05dSBen Gardon } 1230a6a0b05dSBen Gardon 1231a6a0b05dSBen Gardon /* 1232a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1233a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1234a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1235a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1236a6a0b05dSBen Gardon * be flushed. 1237a6a0b05dSBen Gardon */ 1238a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1239a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1240a6a0b05dSBen Gardon { 1241a6a0b05dSBen Gardon struct tdp_iter iter; 1242a6a0b05dSBen Gardon u64 new_spte; 1243a6a0b05dSBen Gardon bool spte_set = false; 1244a6a0b05dSBen Gardon 12457cca2d0bSBen Gardon rcu_read_lock(); 12467cca2d0bSBen Gardon 1247a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 124824ae4cfaSBen Gardon retry: 124924ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12501af4a960SBen Gardon continue; 12511af4a960SBen Gardon 1252a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1253a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1254a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1255a6a0b05dSBen Gardon else 1256a6a0b05dSBen Gardon continue; 1257a6a0b05dSBen Gardon } else { 1258a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1259a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1260a6a0b05dSBen Gardon else 1261a6a0b05dSBen Gardon continue; 1262a6a0b05dSBen Gardon } 1263a6a0b05dSBen Gardon 12646ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { 126524ae4cfaSBen Gardon /* 126624ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 126724ae4cfaSBen Gardon * the atomic cmpxchg failed. 126824ae4cfaSBen Gardon */ 126924ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 127024ae4cfaSBen Gardon goto retry; 127124ae4cfaSBen Gardon } 1272a6a0b05dSBen Gardon spte_set = true; 1273a6a0b05dSBen Gardon } 12747cca2d0bSBen Gardon 12757cca2d0bSBen Gardon rcu_read_unlock(); 1276a6a0b05dSBen Gardon return spte_set; 1277a6a0b05dSBen Gardon } 1278a6a0b05dSBen Gardon 1279a6a0b05dSBen Gardon /* 1280a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1281a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1282a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1283a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1284a6a0b05dSBen Gardon * be flushed. 1285a6a0b05dSBen Gardon */ 1286269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1287269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1288a6a0b05dSBen Gardon { 1289a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1290a6a0b05dSBen Gardon bool spte_set = false; 1291a6a0b05dSBen Gardon 129224ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1293a6a0b05dSBen Gardon 129424ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1295a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1296a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1297a6a0b05dSBen Gardon 1298a6a0b05dSBen Gardon return spte_set; 1299a6a0b05dSBen Gardon } 1300a6a0b05dSBen Gardon 1301a6a0b05dSBen Gardon /* 1302a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1303a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1304a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1305a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1306a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1307a6a0b05dSBen Gardon */ 1308a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1309a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1310a6a0b05dSBen Gardon { 1311a6a0b05dSBen Gardon struct tdp_iter iter; 1312a6a0b05dSBen Gardon u64 new_spte; 1313a6a0b05dSBen Gardon 13147cca2d0bSBen Gardon rcu_read_lock(); 13157cca2d0bSBen Gardon 1316a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1317a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1318a6a0b05dSBen Gardon if (!mask) 1319a6a0b05dSBen Gardon break; 1320a6a0b05dSBen Gardon 1321a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1322a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1323a6a0b05dSBen Gardon continue; 1324a6a0b05dSBen Gardon 1325f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1326f1b3b06aSBen Gardon 1327a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1328a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1329a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1330a6a0b05dSBen Gardon else 1331a6a0b05dSBen Gardon continue; 1332a6a0b05dSBen Gardon } else { 1333a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1334a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1335a6a0b05dSBen Gardon else 1336a6a0b05dSBen Gardon continue; 1337a6a0b05dSBen Gardon } 1338a6a0b05dSBen Gardon 1339a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1340a6a0b05dSBen Gardon } 13417cca2d0bSBen Gardon 13427cca2d0bSBen Gardon rcu_read_unlock(); 1343a6a0b05dSBen Gardon } 1344a6a0b05dSBen Gardon 1345a6a0b05dSBen Gardon /* 1346a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1347a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1348a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1349a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1350a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1351a6a0b05dSBen Gardon */ 1352a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1353a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1354a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1355a6a0b05dSBen Gardon bool wrprot) 1356a6a0b05dSBen Gardon { 1357a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1358a6a0b05dSBen Gardon 1359531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1360a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1361a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1362a6a0b05dSBen Gardon } 1363a6a0b05dSBen Gardon 1364a6a0b05dSBen Gardon /* 136587aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 136687aa9ec9SBen Gardon * GFNs within the slot. 136714881998SBen Gardon */ 13684b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm, 136914881998SBen Gardon struct kvm_mmu_page *root, 13704b85c921SSean Christopherson const struct kvm_memory_slot *slot) 137114881998SBen Gardon { 13729eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 13739eba50f8SSean Christopherson gfn_t end = start + slot->npages; 137414881998SBen Gardon struct tdp_iter iter; 137514881998SBen Gardon kvm_pfn_t pfn; 137614881998SBen Gardon 13777cca2d0bSBen Gardon rcu_read_lock(); 13787cca2d0bSBen Gardon 137914881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 13802db6f772SBen Gardon retry: 13814b85c921SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 13821af4a960SBen Gardon continue; 13831af4a960SBen Gardon 138414881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 138587aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 138614881998SBen Gardon continue; 138714881998SBen Gardon 138814881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 138914881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 13909eba50f8SSean Christopherson iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, 13919eba50f8SSean Christopherson pfn, PG_LEVEL_NUM)) 139214881998SBen Gardon continue; 139314881998SBen Gardon 13944b85c921SSean Christopherson /* Note, a successful atomic zap also does a remote TLB flush. */ 13952db6f772SBen Gardon if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 13962db6f772SBen Gardon /* 13972db6f772SBen Gardon * The iter must explicitly re-read the SPTE because 13982db6f772SBen Gardon * the atomic cmpxchg failed. 13992db6f772SBen Gardon */ 14002db6f772SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 14012db6f772SBen Gardon goto retry; 14022db6f772SBen Gardon } 140314881998SBen Gardon } 140414881998SBen Gardon 14057cca2d0bSBen Gardon rcu_read_unlock(); 140614881998SBen Gardon } 140714881998SBen Gardon 140814881998SBen Gardon /* 140914881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 141014881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 141114881998SBen Gardon */ 14124b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 14134b85c921SSean Christopherson const struct kvm_memory_slot *slot) 141414881998SBen Gardon { 141514881998SBen Gardon struct kvm_mmu_page *root; 141614881998SBen Gardon 14172db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 141814881998SBen Gardon 14192db6f772SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 14204b85c921SSean Christopherson zap_collapsible_spte_range(kvm, root, slot); 142114881998SBen Gardon } 142246044f72SBen Gardon 142346044f72SBen Gardon /* 142446044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14255fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 142646044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 142746044f72SBen Gardon */ 142846044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 14293ad93562SKeqian Zhu gfn_t gfn, int min_level) 143046044f72SBen Gardon { 143146044f72SBen Gardon struct tdp_iter iter; 143246044f72SBen Gardon u64 new_spte; 143346044f72SBen Gardon bool spte_set = false; 143446044f72SBen Gardon 14353ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 14363ad93562SKeqian Zhu 14377cca2d0bSBen Gardon rcu_read_lock(); 14387cca2d0bSBen Gardon 14393ad93562SKeqian Zhu for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 14403ad93562SKeqian Zhu min_level, gfn, gfn + 1) { 14413ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 14423ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 14433ad93562SKeqian Zhu continue; 14443ad93562SKeqian Zhu 144546044f72SBen Gardon new_spte = iter.old_spte & 14465fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 144746044f72SBen Gardon 1448*7c8a4742SDavid Matlack if (new_spte == iter.old_spte) 1449*7c8a4742SDavid Matlack break; 1450*7c8a4742SDavid Matlack 145146044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 145246044f72SBen Gardon spte_set = true; 145346044f72SBen Gardon } 145446044f72SBen Gardon 14557cca2d0bSBen Gardon rcu_read_unlock(); 14567cca2d0bSBen Gardon 145746044f72SBen Gardon return spte_set; 145846044f72SBen Gardon } 145946044f72SBen Gardon 146046044f72SBen Gardon /* 146146044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14625fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 146346044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 146446044f72SBen Gardon */ 146546044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 14663ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 14673ad93562SKeqian Zhu int min_level) 146846044f72SBen Gardon { 146946044f72SBen Gardon struct kvm_mmu_page *root; 147046044f72SBen Gardon bool spte_set = false; 147146044f72SBen Gardon 1472531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1473a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 14743ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1475a3f15bdaSSean Christopherson 147646044f72SBen Gardon return spte_set; 147746044f72SBen Gardon } 147846044f72SBen Gardon 147995fb5b02SBen Gardon /* 148095fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 148195fb5b02SBen Gardon * That SPTE may be non-present. 1482c5c8c7c5SDavid Matlack * 1483c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 148495fb5b02SBen Gardon */ 148539b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 148639b4d43eSSean Christopherson int *root_level) 148795fb5b02SBen Gardon { 148895fb5b02SBen Gardon struct tdp_iter iter; 148995fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 149095fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 14912aa07893SSean Christopherson int leaf = -1; 149295fb5b02SBen Gardon 149339b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 149495fb5b02SBen Gardon 149595fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 149695fb5b02SBen Gardon leaf = iter.level; 1497dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 149895fb5b02SBen Gardon } 149995fb5b02SBen Gardon 150095fb5b02SBen Gardon return leaf; 150195fb5b02SBen Gardon } 15026e8eb206SDavid Matlack 15036e8eb206SDavid Matlack /* 15046e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 15056e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 15066e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 15076e8eb206SDavid Matlack * 15086e8eb206SDavid Matlack * Contract: 15096e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 15106e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 15116e8eb206SDavid Matlack * 15126e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 15136e8eb206SDavid Matlack */ 15146e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 15156e8eb206SDavid Matlack u64 *spte) 15166e8eb206SDavid Matlack { 15176e8eb206SDavid Matlack struct tdp_iter iter; 15186e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 15196e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 15206e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 15216e8eb206SDavid Matlack 15226e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 15236e8eb206SDavid Matlack *spte = iter.old_spte; 15246e8eb206SDavid Matlack sptep = iter.sptep; 15256e8eb206SDavid Matlack } 15266e8eb206SDavid Matlack 15276e8eb206SDavid Matlack /* 15286e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 15296e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 15306e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 15316e8eb206SDavid Matlack * annotation. 15326e8eb206SDavid Matlack * 15336e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 15346e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 15356e8eb206SDavid Matlack * outside of mmu_lock. 15366e8eb206SDavid Matlack */ 15376e8eb206SDavid Matlack return rcu_dereference(sptep); 15386e8eb206SDavid Matlack } 1539