1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 20d501f747SBen Gardon return false; 21fe5db27dSBen Gardon 22fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 23fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 2402c00b3aSBen Gardon 2502c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 269a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 2789c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 28d501f747SBen Gardon 29d501f747SBen Gardon return true; 30fe5db27dSBen Gardon } 31fe5db27dSBen Gardon 326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 336103bc07SBen Gardon bool shared) 346103bc07SBen Gardon { 356103bc07SBen Gardon if (shared) 366103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 376103bc07SBen Gardon else 386103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 396103bc07SBen Gardon } 406103bc07SBen Gardon 41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42fe5db27dSBen Gardon { 43fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 44fe5db27dSBen Gardon return; 4502c00b3aSBen Gardon 4602c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 477cca2d0bSBen Gardon 487cca2d0bSBen Gardon /* 497cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 507cca2d0bSBen Gardon * can run before the VM is torn down. 517cca2d0bSBen Gardon */ 527cca2d0bSBen Gardon rcu_barrier(); 5302c00b3aSBen Gardon } 5402c00b3aSBen Gardon 552bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 566103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 576103bc07SBen Gardon bool shared); 582bdb3d84SBen Gardon 592bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 60a889ea54SBen Gardon { 612bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 622bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 63a889ea54SBen Gardon } 64a889ea54SBen Gardon 65c0e64238SBen Gardon /* 66c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 67c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 68c0e64238SBen Gardon * the memory. 69c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 70c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 71c0e64238SBen Gardon * memory won't use it after it is freed. 72c0e64238SBen Gardon */ 73c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 74a889ea54SBen Gardon { 75c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 76c0e64238SBen Gardon rcu_head); 77a889ea54SBen Gardon 78c0e64238SBen Gardon tdp_mmu_free_sp(sp); 79a889ea54SBen Gardon } 80a889ea54SBen Gardon 816103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 826103bc07SBen Gardon bool shared) 832bdb3d84SBen Gardon { 842bdb3d84SBen Gardon gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 852bdb3d84SBen Gardon 866103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 872bdb3d84SBen Gardon 8811cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 892bdb3d84SBen Gardon return; 902bdb3d84SBen Gardon 912bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 922bdb3d84SBen Gardon 93c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 94c0e64238SBen Gardon list_del_rcu(&root->link); 95c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 962bdb3d84SBen Gardon 976103bc07SBen Gardon zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared); 982bdb3d84SBen Gardon 99c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 100a889ea54SBen Gardon } 101a889ea54SBen Gardon 102cfc10997SBen Gardon /* 103cfc10997SBen Gardon * Finds the next valid root after root (or the first valid root if root 104cfc10997SBen Gardon * is NULL), takes a reference on it, and returns that next root. If root 105cfc10997SBen Gardon * is not NULL, this thread should have already taken a reference on it, and 106cfc10997SBen Gardon * that reference will be dropped. If no valid root is found, this 107cfc10997SBen Gardon * function will return NULL. 108cfc10997SBen Gardon */ 109cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 1106103bc07SBen Gardon struct kvm_mmu_page *prev_root, 1116103bc07SBen Gardon bool shared) 112a889ea54SBen Gardon { 113a889ea54SBen Gardon struct kvm_mmu_page *next_root; 114a889ea54SBen Gardon 115c0e64238SBen Gardon rcu_read_lock(); 116c0e64238SBen Gardon 117cfc10997SBen Gardon if (prev_root) 118c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 119c0e64238SBen Gardon &prev_root->link, 120c0e64238SBen Gardon typeof(*prev_root), link); 121cfc10997SBen Gardon else 122c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 123cfc10997SBen Gardon typeof(*next_root), link); 124cfc10997SBen Gardon 125c0e64238SBen Gardon while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) 126c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 127c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 128fb101293SBen Gardon 129c0e64238SBen Gardon rcu_read_unlock(); 130cfc10997SBen Gardon 131cfc10997SBen Gardon if (prev_root) 1326103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 133cfc10997SBen Gardon 134a889ea54SBen Gardon return next_root; 135a889ea54SBen Gardon } 136a889ea54SBen Gardon 137a889ea54SBen Gardon /* 138a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 139a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 140a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 141a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 1426103bc07SBen Gardon * 1436103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 1446103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 1456103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 146a889ea54SBen Gardon */ 1476103bc07SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 1486103bc07SBen Gardon for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ 149cfc10997SBen Gardon _root; \ 1506103bc07SBen Gardon _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ 151a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 152a3f15bdaSSean Christopherson } else 153a889ea54SBen Gardon 154a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 155c0e64238SBen Gardon list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ 156c0e64238SBen Gardon lockdep_is_held_type(&kvm->mmu_lock, 0) || \ 157c0e64238SBen Gardon lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ 158a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 159a3f15bdaSSean Christopherson } else 16002c00b3aSBen Gardon 16102c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, 16202c00b3aSBen Gardon int level) 16302c00b3aSBen Gardon { 16402c00b3aSBen Gardon union kvm_mmu_page_role role; 16502c00b3aSBen Gardon 16602c00b3aSBen Gardon role = vcpu->arch.mmu->mmu_role.base; 16702c00b3aSBen Gardon role.level = level; 16802c00b3aSBen Gardon role.direct = true; 16902c00b3aSBen Gardon role.gpte_is_8_bytes = true; 17002c00b3aSBen Gardon role.access = ACC_ALL; 17102c00b3aSBen Gardon 17202c00b3aSBen Gardon return role; 17302c00b3aSBen Gardon } 17402c00b3aSBen Gardon 17502c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, 17602c00b3aSBen Gardon int level) 17702c00b3aSBen Gardon { 17802c00b3aSBen Gardon struct kvm_mmu_page *sp; 17902c00b3aSBen Gardon 18002c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 18102c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 18202c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 18302c00b3aSBen Gardon 18402c00b3aSBen Gardon sp->role.word = page_role_for_level(vcpu, level).word; 18502c00b3aSBen Gardon sp->gfn = gfn; 18602c00b3aSBen Gardon sp->tdp_mmu_page = true; 18702c00b3aSBen Gardon 18833dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 18933dd3574SBen Gardon 19002c00b3aSBen Gardon return sp; 19102c00b3aSBen Gardon } 19202c00b3aSBen Gardon 1936e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 19402c00b3aSBen Gardon { 19502c00b3aSBen Gardon union kvm_mmu_page_role role; 19602c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 19702c00b3aSBen Gardon struct kvm_mmu_page *root; 19802c00b3aSBen Gardon 1996e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 20002c00b3aSBen Gardon 20102c00b3aSBen Gardon role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); 20202c00b3aSBen Gardon 20302c00b3aSBen Gardon /* Check for an existing root before allocating a new one. */ 204a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 205fb101293SBen Gardon if (root->role.word == role.word && 206fb101293SBen Gardon kvm_tdp_mmu_get_root(kvm, root)) 2076e6ec584SSean Christopherson goto out; 20802c00b3aSBen Gardon } 20902c00b3aSBen Gardon 21002c00b3aSBen Gardon root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); 21111cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 21202c00b3aSBen Gardon 213c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 214c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 215c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 21602c00b3aSBen Gardon 2176e6ec584SSean Christopherson out: 21802c00b3aSBen Gardon return __pa(root->spt); 219fe5db27dSBen Gardon } 2202f2fad08SBen Gardon 2212f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2229a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 2239a77daacSBen Gardon bool shared); 2242f2fad08SBen Gardon 225f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 226f8e14497SBen Gardon { 227f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 228f8e14497SBen Gardon return; 229f8e14497SBen Gardon 230f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 23164bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 23264bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 233f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 234f8e14497SBen Gardon } 235f8e14497SBen Gardon 236a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 237a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 238a6a0b05dSBen Gardon { 239a6a0b05dSBen Gardon bool pfn_changed; 240a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 241a6a0b05dSBen Gardon 242a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 243a6a0b05dSBen Gardon return; 244a6a0b05dSBen Gardon 245a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 246a6a0b05dSBen Gardon 247a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 248a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 249a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 250fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 251a6a0b05dSBen Gardon } 252a6a0b05dSBen Gardon } 253a6a0b05dSBen Gardon 2542f2fad08SBen Gardon /** 255a9442f59SBen Gardon * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU 256a9442f59SBen Gardon * 257a9442f59SBen Gardon * @kvm: kvm instance 258a9442f59SBen Gardon * @sp: the new page 2599a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 2609a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 2619a77daacSBen Gardon * threads that might be adding or removing pages. 262a9442f59SBen Gardon * @account_nx: This page replaces a NX large page and should be marked for 263a9442f59SBen Gardon * eventual reclaim. 264a9442f59SBen Gardon */ 265a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2669a77daacSBen Gardon bool shared, bool account_nx) 267a9442f59SBen Gardon { 2689a77daacSBen Gardon if (shared) 2699a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 2709a77daacSBen Gardon else 271a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 272a9442f59SBen Gardon 273a9442f59SBen Gardon list_add(&sp->link, &kvm->arch.tdp_mmu_pages); 274a9442f59SBen Gardon if (account_nx) 275a9442f59SBen Gardon account_huge_nx_page(kvm, sp); 2769a77daacSBen Gardon 2779a77daacSBen Gardon if (shared) 2789a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 279a9442f59SBen Gardon } 280a9442f59SBen Gardon 281a9442f59SBen Gardon /** 282a9442f59SBen Gardon * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU 283a9442f59SBen Gardon * 284a9442f59SBen Gardon * @kvm: kvm instance 285a9442f59SBen Gardon * @sp: the page to be removed 2869a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 2879a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 2889a77daacSBen Gardon * threads that might be adding or removing pages. 289a9442f59SBen Gardon */ 2909a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2919a77daacSBen Gardon bool shared) 292a9442f59SBen Gardon { 2939a77daacSBen Gardon if (shared) 2949a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 2959a77daacSBen Gardon else 296a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 297a9442f59SBen Gardon 298a9442f59SBen Gardon list_del(&sp->link); 299a9442f59SBen Gardon if (sp->lpage_disallowed) 300a9442f59SBen Gardon unaccount_huge_nx_page(kvm, sp); 3019a77daacSBen Gardon 3029a77daacSBen Gardon if (shared) 3039a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 304a9442f59SBen Gardon } 305a9442f59SBen Gardon 306a9442f59SBen Gardon /** 307a066e61fSBen Gardon * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure 308a066e61fSBen Gardon * 309a066e61fSBen Gardon * @kvm: kvm instance 310a066e61fSBen Gardon * @pt: the page removed from the paging structure 3119a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 3129a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 3139a77daacSBen Gardon * threads that might be modifying SPTEs. 314a066e61fSBen Gardon * 315a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 316a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 31770fb3e41SBen Gardon * 31870fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 31970fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 32070fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 32170fb3e41SBen Gardon * early rcu_dereferences in the function. 322a066e61fSBen Gardon */ 32370fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, 3249a77daacSBen Gardon bool shared) 325a066e61fSBen Gardon { 32670fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 327a066e61fSBen Gardon int level = sp->role.level; 328e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 329a066e61fSBen Gardon u64 old_child_spte; 3309a77daacSBen Gardon u64 *sptep; 331e25f0e0cSBen Gardon gfn_t gfn; 332a066e61fSBen Gardon int i; 333a066e61fSBen Gardon 334a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 335a066e61fSBen Gardon 3369a77daacSBen Gardon tdp_mmu_unlink_page(kvm, sp, shared); 337a066e61fSBen Gardon 338a066e61fSBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 33970fb3e41SBen Gardon sptep = rcu_dereference(pt) + i; 340e25f0e0cSBen Gardon gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)); 3419a77daacSBen Gardon 3429a77daacSBen Gardon if (shared) { 343e25f0e0cSBen Gardon /* 344e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 345e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 346e25f0e0cSBen Gardon * already marked as removed then another thread 347e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 348e25f0e0cSBen Gardon * set the SPTE until it is set from some other 349e25f0e0cSBen Gardon * value to the removed SPTE value. 350e25f0e0cSBen Gardon */ 351e25f0e0cSBen Gardon for (;;) { 352e25f0e0cSBen Gardon old_child_spte = xchg(sptep, REMOVED_SPTE); 353e25f0e0cSBen Gardon if (!is_removed_spte(old_child_spte)) 354e25f0e0cSBen Gardon break; 355e25f0e0cSBen Gardon cpu_relax(); 356e25f0e0cSBen Gardon } 3579a77daacSBen Gardon } else { 3588df9f1afSSean Christopherson /* 3598df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 3608df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 3618df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 3628df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 3638df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 3648df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 3658df9f1afSSean Christopherson * unreachable. 3668df9f1afSSean Christopherson */ 3679a77daacSBen Gardon old_child_spte = READ_ONCE(*sptep); 3688df9f1afSSean Christopherson if (!is_shadow_present_pte(old_child_spte)) 3698df9f1afSSean Christopherson continue; 370e25f0e0cSBen Gardon 371e25f0e0cSBen Gardon /* 372e25f0e0cSBen Gardon * Marking the SPTE as a removed SPTE is not 373e25f0e0cSBen Gardon * strictly necessary here as the MMU lock will 374e25f0e0cSBen Gardon * stop other threads from concurrently modifying 375e25f0e0cSBen Gardon * this SPTE. Using the removed SPTE value keeps 376e25f0e0cSBen Gardon * the two branches consistent and simplifies 377e25f0e0cSBen Gardon * the function. 378e25f0e0cSBen Gardon */ 379e25f0e0cSBen Gardon WRITE_ONCE(*sptep, REMOVED_SPTE); 3809a77daacSBen Gardon } 381e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 382e25f0e0cSBen Gardon old_child_spte, REMOVED_SPTE, level - 1, 383e25f0e0cSBen Gardon shared); 384a066e61fSBen Gardon } 385a066e61fSBen Gardon 386a066e61fSBen Gardon kvm_flush_remote_tlbs_with_address(kvm, gfn, 387a066e61fSBen Gardon KVM_PAGES_PER_HPAGE(level)); 388a066e61fSBen Gardon 3897cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 390a066e61fSBen Gardon } 391a066e61fSBen Gardon 392a066e61fSBen Gardon /** 3937f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 3942f2fad08SBen Gardon * @kvm: kvm instance 3952f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 3962f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 3972f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 3982f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 3992f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 4009a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 4019a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 4029a77daacSBen Gardon * threads that might be modifying SPTEs. 4032f2fad08SBen Gardon * 4042f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 4052f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 4062f2fad08SBen Gardon */ 4072f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4089a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4099a77daacSBen Gardon bool shared) 4102f2fad08SBen Gardon { 4112f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 4122f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 4132f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 4142f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 4152f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 4162f2fad08SBen Gardon 4172f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 4182f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 419764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 4202f2fad08SBen Gardon 4212f2fad08SBen Gardon /* 4222f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 4232f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 4242f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 425d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 4262f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 4272f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 4282f2fad08SBen Gardon * thread before replacement. 4292f2fad08SBen Gardon */ 4302f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 4312f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 4322f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 4332f2fad08SBen Gardon "different PFN!\n" 4342f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4352f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4362f2fad08SBen Gardon 4372f2fad08SBen Gardon /* 4382f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 439d9f6e12fSIngo Molnar * corruption. 4402f2fad08SBen Gardon */ 4412f2fad08SBen Gardon BUG(); 4422f2fad08SBen Gardon } 4432f2fad08SBen Gardon 4442f2fad08SBen Gardon if (old_spte == new_spte) 4452f2fad08SBen Gardon return; 4462f2fad08SBen Gardon 447b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 448b9a98c34SBen Gardon 4491699f65cSShahin, Md Shahadat Hossain if (is_large_pte(old_spte) != is_large_pte(new_spte)) { 4501699f65cSShahin, Md Shahadat Hossain if (is_large_pte(old_spte)) 4511699f65cSShahin, Md Shahadat Hossain atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages); 4521699f65cSShahin, Md Shahadat Hossain else 4531699f65cSShahin, Md Shahadat Hossain atomic64_add(1, (atomic64_t*)&kvm->stat.lpages); 4541699f65cSShahin, Md Shahadat Hossain } 4551699f65cSShahin, Md Shahadat Hossain 4562f2fad08SBen Gardon /* 4572f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 4582f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 4592f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 4602f2fad08SBen Gardon */ 4612f2fad08SBen Gardon if (!was_present && !is_present) { 4622f2fad08SBen Gardon /* 46308f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 46408f07c80SBen Gardon * it is unexpected. Log the change, though it should not 46508f07c80SBen Gardon * impact the guest since both the former and current SPTEs 46608f07c80SBen Gardon * are nonpresent. 4672f2fad08SBen Gardon */ 46808f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 46908f07c80SBen Gardon !is_mmio_spte(new_spte) && 47008f07c80SBen Gardon !is_removed_spte(new_spte))) 4712f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 4722f2fad08SBen Gardon "should not be replaced with another,\n" 4732f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 47408f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 47508f07c80SBen Gardon "a temporary removed SPTE.\n" 4762f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4772f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4782f2fad08SBen Gardon return; 4792f2fad08SBen Gardon } 4802f2fad08SBen Gardon 4812f2fad08SBen Gardon 4822f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 48364bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 4842f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 4852f2fad08SBen Gardon 4862f2fad08SBen Gardon /* 4872f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 4882f2fad08SBen Gardon * the paging structure. 4892f2fad08SBen Gardon */ 490a066e61fSBen Gardon if (was_present && !was_leaf && (pfn_changed || !is_present)) 491a066e61fSBen Gardon handle_removed_tdp_mmu_page(kvm, 4929a77daacSBen Gardon spte_to_child_pt(old_spte, level), shared); 4932f2fad08SBen Gardon } 4942f2fad08SBen Gardon 4952f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4969a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4979a77daacSBen Gardon bool shared) 4982f2fad08SBen Gardon { 4999a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 5009a77daacSBen Gardon shared); 501f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 502a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 503a6a0b05dSBen Gardon new_spte, level); 5042f2fad08SBen Gardon } 505faaf05b0SBen Gardon 506fe43fa2fSBen Gardon /* 50724ae4cfaSBen Gardon * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically 50824ae4cfaSBen Gardon * and handle the associated bookkeeping, but do not mark the page dirty 50924ae4cfaSBen Gardon * in KVM's dirty bitmaps. 5109a77daacSBen Gardon * 5119a77daacSBen Gardon * @kvm: kvm instance 5129a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 5139a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 5149a77daacSBen Gardon * Returns: true if the SPTE was set, false if it was not. If false is returned, 5159a77daacSBen Gardon * this function will have no side-effects. 5169a77daacSBen Gardon */ 51724ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, 5189a77daacSBen Gardon struct tdp_iter *iter, 5199a77daacSBen Gardon u64 new_spte) 5209a77daacSBen Gardon { 5219a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 5229a77daacSBen Gardon 52308f07c80SBen Gardon /* 52408f07c80SBen Gardon * Do not change removed SPTEs. Only the thread that froze the SPTE 52508f07c80SBen Gardon * may modify it. 52608f07c80SBen Gardon */ 5277a51393aSSean Christopherson if (is_removed_spte(iter->old_spte)) 52808f07c80SBen Gardon return false; 52908f07c80SBen Gardon 5309a77daacSBen Gardon if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, 5319a77daacSBen Gardon new_spte) != iter->old_spte) 5329a77daacSBen Gardon return false; 5339a77daacSBen Gardon 53424ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 53508889894SSean Christopherson new_spte, iter->level, true); 53624ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 5379a77daacSBen Gardon 5389a77daacSBen Gardon return true; 5399a77daacSBen Gardon } 5409a77daacSBen Gardon 54124ae4cfaSBen Gardon static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, 54224ae4cfaSBen Gardon struct tdp_iter *iter, 54324ae4cfaSBen Gardon u64 new_spte) 54424ae4cfaSBen Gardon { 54524ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) 54624ae4cfaSBen Gardon return false; 54724ae4cfaSBen Gardon 54824ae4cfaSBen Gardon handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 54924ae4cfaSBen Gardon iter->old_spte, new_spte, iter->level); 55024ae4cfaSBen Gardon return true; 55124ae4cfaSBen Gardon } 55224ae4cfaSBen Gardon 55308f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, 55408f07c80SBen Gardon struct tdp_iter *iter) 55508f07c80SBen Gardon { 55608f07c80SBen Gardon /* 55708f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 55808f07c80SBen Gardon * non-present value. This will stop other threads from 55908f07c80SBen Gardon * immediately installing a present entry in its place 56008f07c80SBen Gardon * before the TLBs are flushed. 56108f07c80SBen Gardon */ 56208f07c80SBen Gardon if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) 56308f07c80SBen Gardon return false; 56408f07c80SBen Gardon 56508f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 56608f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 56708f07c80SBen Gardon 56808f07c80SBen Gardon /* 56908f07c80SBen Gardon * No other thread can overwrite the removed SPTE as they 57008f07c80SBen Gardon * must either wait on the MMU lock or use 571d9f6e12fSIngo Molnar * tdp_mmu_set_spte_atomic which will not overwrite the 57208f07c80SBen Gardon * special removed SPTE value. No bookkeeping is needed 57308f07c80SBen Gardon * here since the SPTE is going from non-present 57408f07c80SBen Gardon * to non-present. 57508f07c80SBen Gardon */ 57614f6fec2SBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), 0); 57708f07c80SBen Gardon 57808f07c80SBen Gardon return true; 57908f07c80SBen Gardon } 58008f07c80SBen Gardon 5819a77daacSBen Gardon 5829a77daacSBen Gardon /* 583fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 584fe43fa2fSBen Gardon * @kvm: kvm instance 585fe43fa2fSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 586fe43fa2fSBen Gardon * @new_spte: The value the SPTE should be set to 587fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 588fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 589fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 590fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 591fe43fa2fSBen Gardon * double counted. 592fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 593fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 594fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 595fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 596fe43fa2fSBen Gardon * writes from being double counted. 597fe43fa2fSBen Gardon */ 598f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 599a6a0b05dSBen Gardon u64 new_spte, bool record_acc_track, 600a6a0b05dSBen Gardon bool record_dirty_log) 601faaf05b0SBen Gardon { 602531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 6033a9a4aa5SBen Gardon 60408f07c80SBen Gardon /* 60508f07c80SBen Gardon * No thread should be using this function to set SPTEs to the 60608f07c80SBen Gardon * temporary removed SPTE value. 60708f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 60808f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 60908f07c80SBen Gardon * use of the removed SPTE should not be necessary. 61008f07c80SBen Gardon */ 6117a51393aSSean Christopherson WARN_ON(is_removed_spte(iter->old_spte)); 61208f07c80SBen Gardon 6137cca2d0bSBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); 614faaf05b0SBen Gardon 61508889894SSean Christopherson __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 61608889894SSean Christopherson new_spte, iter->level, false); 617f8e14497SBen Gardon if (record_acc_track) 618f8e14497SBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, 619f8e14497SBen Gardon iter->level); 620a6a0b05dSBen Gardon if (record_dirty_log) 62108889894SSean Christopherson handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 622a6a0b05dSBen Gardon iter->old_spte, new_spte, 623a6a0b05dSBen Gardon iter->level); 624f8e14497SBen Gardon } 625f8e14497SBen Gardon 626f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 627f8e14497SBen Gardon u64 new_spte) 628f8e14497SBen Gardon { 629a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 630f8e14497SBen Gardon } 631f8e14497SBen Gardon 632f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 633f8e14497SBen Gardon struct tdp_iter *iter, 634f8e14497SBen Gardon u64 new_spte) 635f8e14497SBen Gardon { 636a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 637a6a0b05dSBen Gardon } 638a6a0b05dSBen Gardon 639a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 640a6a0b05dSBen Gardon struct tdp_iter *iter, 641a6a0b05dSBen Gardon u64 new_spte) 642a6a0b05dSBen Gardon { 643a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 644faaf05b0SBen Gardon } 645faaf05b0SBen Gardon 646faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 647faaf05b0SBen Gardon for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) 648faaf05b0SBen Gardon 649f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 650f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 651f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 652f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 653f8e14497SBen Gardon continue; \ 654f8e14497SBen Gardon else 655f8e14497SBen Gardon 656bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 657bb18842eSBen Gardon for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ 658bb18842eSBen Gardon _mmu->shadow_root_level, _start, _end) 659bb18842eSBen Gardon 660faaf05b0SBen Gardon /* 661e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 662e28a436cSBen Gardon * to the scheduler. 663e28a436cSBen Gardon * 664e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 665e139a34eSBen Gardon * TLB flush before yielding. 666e139a34eSBen Gardon * 667e28a436cSBen Gardon * If this function yields, it will also reset the tdp_iter's walk over the 668ed5e484bSBen Gardon * paging structure and the calling function should skip to the next 669ed5e484bSBen Gardon * iteration to allow the iterator to continue its traversal from the 670ed5e484bSBen Gardon * paging structure root. 671e28a436cSBen Gardon * 672e28a436cSBen Gardon * Return true if this function yielded and the iterator's traversal was reset. 673e28a436cSBen Gardon * Return false if a yield was not needed. 674e28a436cSBen Gardon */ 675e139a34eSBen Gardon static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, 6766103bc07SBen Gardon struct tdp_iter *iter, bool flush, 6776103bc07SBen Gardon bool shared) 678a6a0b05dSBen Gardon { 679ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 680ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 681ed5e484bSBen Gardon return false; 682ed5e484bSBen Gardon 683531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 6847cca2d0bSBen Gardon rcu_read_unlock(); 6857cca2d0bSBen Gardon 686e139a34eSBen Gardon if (flush) 687e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 688e139a34eSBen Gardon 6896103bc07SBen Gardon if (shared) 6906103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 6916103bc07SBen Gardon else 692531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 6936103bc07SBen Gardon 6947cca2d0bSBen Gardon rcu_read_lock(); 695ed5e484bSBen Gardon 696ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 697ed5e484bSBen Gardon 698b601c3bcSBen Gardon tdp_iter_restart(iter); 699ed5e484bSBen Gardon 700e28a436cSBen Gardon return true; 701a6a0b05dSBen Gardon } 702e28a436cSBen Gardon 703e28a436cSBen Gardon return false; 704a6a0b05dSBen Gardon } 705a6a0b05dSBen Gardon 706faaf05b0SBen Gardon /* 707faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 708faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 709faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 710faaf05b0SBen Gardon * MMU lock. 7116103bc07SBen Gardon * 712063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 713063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 714063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 715063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 7166103bc07SBen Gardon * operation can cause a soft lockup. 7176103bc07SBen Gardon * 7186103bc07SBen Gardon * If shared is true, this thread holds the MMU lock in read mode and must 7196103bc07SBen Gardon * account for the possibility that other threads are modifying the paging 7206103bc07SBen Gardon * structures concurrently. If shared is false, this thread should hold the 7216103bc07SBen Gardon * MMU lock in write mode. 722faaf05b0SBen Gardon */ 723faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 7246103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 7256103bc07SBen Gardon bool shared) 726faaf05b0SBen Gardon { 727faaf05b0SBen Gardon struct tdp_iter iter; 728faaf05b0SBen Gardon 7296103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 7306103bc07SBen Gardon 7317cca2d0bSBen Gardon rcu_read_lock(); 7327cca2d0bSBen Gardon 733faaf05b0SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 7346103bc07SBen Gardon retry: 7351af4a960SBen Gardon if (can_yield && 7366103bc07SBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { 737a835429cSSean Christopherson flush = false; 7381af4a960SBen Gardon continue; 7391af4a960SBen Gardon } 7401af4a960SBen Gardon 741faaf05b0SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 742faaf05b0SBen Gardon continue; 743faaf05b0SBen Gardon 744faaf05b0SBen Gardon /* 745faaf05b0SBen Gardon * If this is a non-last-level SPTE that covers a larger range 746faaf05b0SBen Gardon * than should be zapped, continue, and zap the mappings at a 747faaf05b0SBen Gardon * lower level. 748faaf05b0SBen Gardon */ 749faaf05b0SBen Gardon if ((iter.gfn < start || 750faaf05b0SBen Gardon iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 751faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 752faaf05b0SBen Gardon continue; 753faaf05b0SBen Gardon 7546103bc07SBen Gardon if (!shared) { 755faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 756a835429cSSean Christopherson flush = true; 7576103bc07SBen Gardon } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 7586103bc07SBen Gardon /* 7596103bc07SBen Gardon * The iter must explicitly re-read the SPTE because 7606103bc07SBen Gardon * the atomic cmpxchg failed. 7616103bc07SBen Gardon */ 7626103bc07SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 7636103bc07SBen Gardon goto retry; 7646103bc07SBen Gardon } 765faaf05b0SBen Gardon } 7667cca2d0bSBen Gardon 7677cca2d0bSBen Gardon rcu_read_unlock(); 768a835429cSSean Christopherson return flush; 769faaf05b0SBen Gardon } 770faaf05b0SBen Gardon 771faaf05b0SBen Gardon /* 772faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 773faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 774faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 775faaf05b0SBen Gardon * MMU lock. 7766103bc07SBen Gardon * 7776103bc07SBen Gardon * If shared is true, this thread holds the MMU lock in read mode and must 7786103bc07SBen Gardon * account for the possibility that other threads are modifying the paging 7796103bc07SBen Gardon * structures concurrently. If shared is false, this thread should hold the 7806103bc07SBen Gardon * MMU in write mode. 781faaf05b0SBen Gardon */ 7822b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 7836103bc07SBen Gardon gfn_t end, bool can_yield, bool flush, 7846103bc07SBen Gardon bool shared) 785faaf05b0SBen Gardon { 786faaf05b0SBen Gardon struct kvm_mmu_page *root; 787faaf05b0SBen Gardon 7886103bc07SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared) 7896103bc07SBen Gardon flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, 7906103bc07SBen Gardon shared); 791faaf05b0SBen Gardon 792faaf05b0SBen Gardon return flush; 793faaf05b0SBen Gardon } 794faaf05b0SBen Gardon 795faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 796faaf05b0SBen Gardon { 797339f5a7fSRick Edgecombe gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 7982b9663d8SSean Christopherson bool flush = false; 7992b9663d8SSean Christopherson int i; 800faaf05b0SBen Gardon 8012b9663d8SSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 8026103bc07SBen Gardon flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, 8036103bc07SBen Gardon flush, false); 8042b9663d8SSean Christopherson 805faaf05b0SBen Gardon if (flush) 806faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 807faaf05b0SBen Gardon } 808bb18842eSBen Gardon 8094c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, 8104c6654bdSBen Gardon struct kvm_mmu_page *prev_root) 8114c6654bdSBen Gardon { 8124c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8134c6654bdSBen Gardon 8144c6654bdSBen Gardon if (prev_root) 8154c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8164c6654bdSBen Gardon &prev_root->link, 8174c6654bdSBen Gardon typeof(*prev_root), link); 8184c6654bdSBen Gardon else 8194c6654bdSBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8204c6654bdSBen Gardon typeof(*next_root), link); 8214c6654bdSBen Gardon 8224c6654bdSBen Gardon while (next_root && !(next_root->role.invalid && 8234c6654bdSBen Gardon refcount_read(&next_root->tdp_mmu_root_count))) 8244c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8254c6654bdSBen Gardon &next_root->link, 8264c6654bdSBen Gardon typeof(*next_root), link); 8274c6654bdSBen Gardon 8284c6654bdSBen Gardon return next_root; 8294c6654bdSBen Gardon } 8304c6654bdSBen Gardon 8314c6654bdSBen Gardon /* 8324c6654bdSBen Gardon * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each 8334c6654bdSBen Gardon * invalidated root, they will not be freed until this function drops the 8344c6654bdSBen Gardon * reference. Before dropping that reference, tear down the paging 8354c6654bdSBen Gardon * structure so that whichever thread does drop the last reference 8364c6654bdSBen Gardon * only has to do a trivial amount of work. Since the roots are invalid, 8374c6654bdSBen Gardon * no new SPTEs should be created under them. 8384c6654bdSBen Gardon */ 8394c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 8404c6654bdSBen Gardon { 8414c6654bdSBen Gardon gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 8424c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8434c6654bdSBen Gardon struct kvm_mmu_page *root; 8444c6654bdSBen Gardon bool flush = false; 8454c6654bdSBen Gardon 8464c6654bdSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 8474c6654bdSBen Gardon 8484c6654bdSBen Gardon rcu_read_lock(); 8494c6654bdSBen Gardon 8504c6654bdSBen Gardon root = next_invalidated_root(kvm, NULL); 8514c6654bdSBen Gardon 8524c6654bdSBen Gardon while (root) { 8534c6654bdSBen Gardon next_root = next_invalidated_root(kvm, root); 8544c6654bdSBen Gardon 8554c6654bdSBen Gardon rcu_read_unlock(); 8564c6654bdSBen Gardon 8574c6654bdSBen Gardon flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush, 8584c6654bdSBen Gardon true); 8594c6654bdSBen Gardon 8604c6654bdSBen Gardon /* 8614c6654bdSBen Gardon * Put the reference acquired in 8624c6654bdSBen Gardon * kvm_tdp_mmu_invalidate_roots 8634c6654bdSBen Gardon */ 8644c6654bdSBen Gardon kvm_tdp_mmu_put_root(kvm, root, true); 8654c6654bdSBen Gardon 8664c6654bdSBen Gardon root = next_root; 8674c6654bdSBen Gardon 8684c6654bdSBen Gardon rcu_read_lock(); 8694c6654bdSBen Gardon } 8704c6654bdSBen Gardon 8714c6654bdSBen Gardon rcu_read_unlock(); 8724c6654bdSBen Gardon 8734c6654bdSBen Gardon if (flush) 8744c6654bdSBen Gardon kvm_flush_remote_tlbs(kvm); 8754c6654bdSBen Gardon } 8764c6654bdSBen Gardon 877bb18842eSBen Gardon /* 878b7cccd39SBen Gardon * Mark each TDP MMU root as invalid so that other threads 879b7cccd39SBen Gardon * will drop their references and allow the root count to 880b7cccd39SBen Gardon * go to 0. 881b7cccd39SBen Gardon * 8824c6654bdSBen Gardon * Also take a reference on all roots so that this thread 8834c6654bdSBen Gardon * can do the bulk of the work required to free the roots 8844c6654bdSBen Gardon * once they are invalidated. Without this reference, a 8854c6654bdSBen Gardon * vCPU thread might drop the last reference to a root and 8864c6654bdSBen Gardon * get stuck with tearing down the entire paging structure. 8874c6654bdSBen Gardon * 8884c6654bdSBen Gardon * Roots which have a zero refcount should be skipped as 8894c6654bdSBen Gardon * they're already being torn down. 8904c6654bdSBen Gardon * Already invalid roots should be referenced again so that 8914c6654bdSBen Gardon * they aren't freed before kvm_tdp_mmu_zap_all_fast is 8924c6654bdSBen Gardon * done with them. 8934c6654bdSBen Gardon * 894b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 895b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 896b7cccd39SBen Gardon */ 897b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 898b7cccd39SBen Gardon { 899b7cccd39SBen Gardon struct kvm_mmu_page *root; 900b7cccd39SBen Gardon 901b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 902b7cccd39SBen Gardon list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) 9034c6654bdSBen Gardon if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) 904b7cccd39SBen Gardon root->role.invalid = true; 905b7cccd39SBen Gardon } 906b7cccd39SBen Gardon 907bb18842eSBen Gardon /* 908bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 909bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 910bb18842eSBen Gardon */ 911bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, 912bb18842eSBen Gardon int map_writable, 913bb18842eSBen Gardon struct tdp_iter *iter, 914bb18842eSBen Gardon kvm_pfn_t pfn, bool prefault) 915bb18842eSBen Gardon { 916bb18842eSBen Gardon u64 new_spte; 917*57a3e96dSKai Huang int ret = RET_PF_FIXED; 918bb18842eSBen Gardon int make_spte_ret = 0; 919bb18842eSBen Gardon 9209a77daacSBen Gardon if (unlikely(is_noslot_pfn(pfn))) 921bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 9229a77daacSBen Gardon else 923bb18842eSBen Gardon make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, 924bb18842eSBen Gardon pfn, iter->old_spte, prefault, true, 925bb18842eSBen Gardon map_writable, !shadow_accessed_mask, 926bb18842eSBen Gardon &new_spte); 927bb18842eSBen Gardon 928bb18842eSBen Gardon if (new_spte == iter->old_spte) 929bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 9309a77daacSBen Gardon else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 9319a77daacSBen Gardon return RET_PF_RETRY; 932bb18842eSBen Gardon 933bb18842eSBen Gardon /* 934bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 935bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 936bb18842eSBen Gardon * the vCPU would have the same fault again. 937bb18842eSBen Gardon */ 938bb18842eSBen Gardon if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { 939bb18842eSBen Gardon if (write) 940bb18842eSBen Gardon ret = RET_PF_EMULATE; 941bb18842eSBen Gardon kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 942bb18842eSBen Gardon } 943bb18842eSBen Gardon 944bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 9459a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 9469a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 9479a77daacSBen Gardon new_spte); 948bb18842eSBen Gardon ret = RET_PF_EMULATE; 9493849e092SSean Christopherson } else { 9509a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 9519a77daacSBen Gardon rcu_dereference(iter->sptep)); 9523849e092SSean Christopherson } 953bb18842eSBen Gardon 954bb18842eSBen Gardon if (!prefault) 955bb18842eSBen Gardon vcpu->stat.pf_fixed++; 956bb18842eSBen Gardon 957bb18842eSBen Gardon return ret; 958bb18842eSBen Gardon } 959bb18842eSBen Gardon 960bb18842eSBen Gardon /* 961bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 962bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 963bb18842eSBen Gardon */ 964bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 965bb18842eSBen Gardon int map_writable, int max_level, kvm_pfn_t pfn, 966bb18842eSBen Gardon bool prefault) 967bb18842eSBen Gardon { 968bb18842eSBen Gardon bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); 969bb18842eSBen Gardon bool write = error_code & PFERR_WRITE_MASK; 970bb18842eSBen Gardon bool exec = error_code & PFERR_FETCH_MASK; 971bb18842eSBen Gardon bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; 972bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 973bb18842eSBen Gardon struct tdp_iter iter; 97489c0fd49SBen Gardon struct kvm_mmu_page *sp; 975bb18842eSBen Gardon u64 *child_pt; 976bb18842eSBen Gardon u64 new_spte; 977bb18842eSBen Gardon int ret; 978bb18842eSBen Gardon gfn_t gfn = gpa >> PAGE_SHIFT; 979bb18842eSBen Gardon int level; 980bb18842eSBen Gardon int req_level; 981bb18842eSBen Gardon 982bb18842eSBen Gardon if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) 983bb18842eSBen Gardon return RET_PF_RETRY; 984bb18842eSBen Gardon if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))) 985bb18842eSBen Gardon return RET_PF_RETRY; 986bb18842eSBen Gardon 987bb18842eSBen Gardon level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, 988bb18842eSBen Gardon huge_page_disallowed, &req_level); 989bb18842eSBen Gardon 990bb18842eSBen Gardon trace_kvm_mmu_spte_requested(gpa, level, pfn); 9917cca2d0bSBen Gardon 9927cca2d0bSBen Gardon rcu_read_lock(); 9937cca2d0bSBen Gardon 994bb18842eSBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 995bb18842eSBen Gardon if (nx_huge_page_workaround_enabled) 996bb18842eSBen Gardon disallowed_hugepage_adjust(iter.old_spte, gfn, 997bb18842eSBen Gardon iter.level, &pfn, &level); 998bb18842eSBen Gardon 999bb18842eSBen Gardon if (iter.level == level) 1000bb18842eSBen Gardon break; 1001bb18842eSBen Gardon 1002bb18842eSBen Gardon /* 1003bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 1004bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 1005bb18842eSBen Gardon * with a non-leaf SPTE. 1006bb18842eSBen Gardon */ 1007bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 1008bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 100908f07c80SBen Gardon if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 10109a77daacSBen Gardon break; 1011bb18842eSBen Gardon 1012bb18842eSBen Gardon /* 1013bb18842eSBen Gardon * The iter must explicitly re-read the spte here 1014bb18842eSBen Gardon * because the new value informs the !present 1015bb18842eSBen Gardon * path below. 1016bb18842eSBen Gardon */ 10177cca2d0bSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 1018bb18842eSBen Gardon } 1019bb18842eSBen Gardon 1020bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 1021ff76d506SKai Huang /* 1022ff76d506SKai Huang * If SPTE has been forzen by another thread, just 1023ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 1024ff76d506SKai Huang * allocation and free. 1025ff76d506SKai Huang */ 1026ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1027ff76d506SKai Huang break; 1028ff76d506SKai Huang 102989c0fd49SBen Gardon sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); 103089c0fd49SBen Gardon child_pt = sp->spt; 1031a9442f59SBen Gardon 1032bb18842eSBen Gardon new_spte = make_nonleaf_spte(child_pt, 1033bb18842eSBen Gardon !shadow_accessed_mask); 1034bb18842eSBen Gardon 10359a77daacSBen Gardon if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, 10369a77daacSBen Gardon new_spte)) { 10379a77daacSBen Gardon tdp_mmu_link_page(vcpu->kvm, sp, true, 10389a77daacSBen Gardon huge_page_disallowed && 10399a77daacSBen Gardon req_level >= iter.level); 10409a77daacSBen Gardon 1041bb18842eSBen Gardon trace_kvm_mmu_get_page(sp, true); 10429a77daacSBen Gardon } else { 10439a77daacSBen Gardon tdp_mmu_free_sp(sp); 10449a77daacSBen Gardon break; 10459a77daacSBen Gardon } 1046bb18842eSBen Gardon } 1047bb18842eSBen Gardon } 1048bb18842eSBen Gardon 10499a77daacSBen Gardon if (iter.level != level) { 10507cca2d0bSBen Gardon rcu_read_unlock(); 1051bb18842eSBen Gardon return RET_PF_RETRY; 10527cca2d0bSBen Gardon } 1053bb18842eSBen Gardon 1054bb18842eSBen Gardon ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, 1055bb18842eSBen Gardon pfn, prefault); 10567cca2d0bSBen Gardon rcu_read_unlock(); 1057bb18842eSBen Gardon 1058bb18842eSBen Gardon return ret; 1059bb18842eSBen Gardon } 1060063afacdSBen Gardon 10613039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 10623039bcc7SSean Christopherson bool flush) 1063063afacdSBen Gardon { 1064063afacdSBen Gardon struct kvm_mmu_page *root; 1065063afacdSBen Gardon 10663039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) 10673039bcc7SSean Christopherson flush |= zap_gfn_range(kvm, root, range->start, range->end, 10686103bc07SBen Gardon range->may_block, flush, false); 1069063afacdSBen Gardon 10703039bcc7SSean Christopherson return flush; 10713039bcc7SSean Christopherson } 10723039bcc7SSean Christopherson 10733039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 10743039bcc7SSean Christopherson struct kvm_gfn_range *range); 10753039bcc7SSean Christopherson 10763039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 10773039bcc7SSean Christopherson struct kvm_gfn_range *range, 1078c1b91493SSean Christopherson tdp_handler_t handler) 1079063afacdSBen Gardon { 1080063afacdSBen Gardon struct kvm_mmu_page *root; 10813039bcc7SSean Christopherson struct tdp_iter iter; 10823039bcc7SSean Christopherson bool ret = false; 1083063afacdSBen Gardon 10843039bcc7SSean Christopherson rcu_read_lock(); 1085063afacdSBen Gardon 1086063afacdSBen Gardon /* 1087e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1088e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1089063afacdSBen Gardon */ 10903039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 10913039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 10923039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 10933039bcc7SSean Christopherson } 1094063afacdSBen Gardon 10953039bcc7SSean Christopherson rcu_read_unlock(); 1096063afacdSBen Gardon 1097063afacdSBen Gardon return ret; 1098063afacdSBen Gardon } 1099063afacdSBen Gardon 1100f8e14497SBen Gardon /* 1101f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1102f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1103f8e14497SBen Gardon */ 11043039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 11053039bcc7SSean Christopherson struct kvm_gfn_range *range) 1106f8e14497SBen Gardon { 1107f8e14497SBen Gardon u64 new_spte = 0; 1108f8e14497SBen Gardon 11093039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 11103039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 11113039bcc7SSean Christopherson return false; 11127cca2d0bSBen Gardon 11133039bcc7SSean Christopherson new_spte = iter->old_spte; 1114f8e14497SBen Gardon 1115f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 11168f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1117f8e14497SBen Gardon } else { 1118f8e14497SBen Gardon /* 1119f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1120f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1121f8e14497SBen Gardon */ 1122f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1123f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1124f8e14497SBen Gardon 1125f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1126f8e14497SBen Gardon } 1127f8e14497SBen Gardon 11283039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 112933dd3574SBen Gardon 11303039bcc7SSean Christopherson return true; 1131f8e14497SBen Gardon } 1132f8e14497SBen Gardon 11333039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1134f8e14497SBen Gardon { 11353039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1136f8e14497SBen Gardon } 1137f8e14497SBen Gardon 11383039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 11393039bcc7SSean Christopherson struct kvm_gfn_range *range) 1140f8e14497SBen Gardon { 11413039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1142f8e14497SBen Gardon } 1143f8e14497SBen Gardon 11443039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1145f8e14497SBen Gardon { 11463039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 11473039bcc7SSean Christopherson } 11483039bcc7SSean Christopherson 11493039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 11503039bcc7SSean Christopherson struct kvm_gfn_range *range) 11513039bcc7SSean Christopherson { 11523039bcc7SSean Christopherson u64 new_spte; 11533039bcc7SSean Christopherson 11543039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 11553039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 11563039bcc7SSean Christopherson 11573039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 11583039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 11593039bcc7SSean Christopherson return false; 11603039bcc7SSean Christopherson 11613039bcc7SSean Christopherson /* 11623039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 11633039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 11643039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 11653039bcc7SSean Christopherson * See __handle_changed_spte(). 11663039bcc7SSean Christopherson */ 11673039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 11683039bcc7SSean Christopherson 11693039bcc7SSean Christopherson if (!pte_write(range->pte)) { 11703039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 11713039bcc7SSean Christopherson pte_pfn(range->pte)); 11723039bcc7SSean Christopherson 11733039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 11743039bcc7SSean Christopherson } 11753039bcc7SSean Christopherson 11763039bcc7SSean Christopherson return true; 1177f8e14497SBen Gardon } 11781d8dd6b3SBen Gardon 11791d8dd6b3SBen Gardon /* 11801d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 11811d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 11821d8dd6b3SBen Gardon * notifier. 11831d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 11841d8dd6b3SBen Gardon */ 11853039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 11861d8dd6b3SBen Gardon { 11873039bcc7SSean Christopherson bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 11881d8dd6b3SBen Gardon 11893039bcc7SSean Christopherson /* FIXME: return 'flush' instead of flushing here. */ 11903039bcc7SSean Christopherson if (flush) 11913039bcc7SSean Christopherson kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); 11927cca2d0bSBen Gardon 11933039bcc7SSean Christopherson return false; 11941d8dd6b3SBen Gardon } 11951d8dd6b3SBen Gardon 1196a6a0b05dSBen Gardon /* 1197bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1198bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1199bedd9195SDavid Matlack * be flushed. 1200a6a0b05dSBen Gardon */ 1201a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1202a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1203a6a0b05dSBen Gardon { 1204a6a0b05dSBen Gardon struct tdp_iter iter; 1205a6a0b05dSBen Gardon u64 new_spte; 1206a6a0b05dSBen Gardon bool spte_set = false; 1207a6a0b05dSBen Gardon 12087cca2d0bSBen Gardon rcu_read_lock(); 12097cca2d0bSBen Gardon 1210a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1211a6a0b05dSBen Gardon 1212a6a0b05dSBen Gardon for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 1213a6a0b05dSBen Gardon min_level, start, end) { 121424ae4cfaSBen Gardon retry: 121524ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12161af4a960SBen Gardon continue; 12171af4a960SBen Gardon 1218a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 12190f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 12200f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1221a6a0b05dSBen Gardon continue; 1222a6a0b05dSBen Gardon 1223a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1224a6a0b05dSBen Gardon 122524ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, 122624ae4cfaSBen Gardon new_spte)) { 122724ae4cfaSBen Gardon /* 122824ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 122924ae4cfaSBen Gardon * the atomic cmpxchg failed. 123024ae4cfaSBen Gardon */ 123124ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 123224ae4cfaSBen Gardon goto retry; 123324ae4cfaSBen Gardon } 1234a6a0b05dSBen Gardon spte_set = true; 1235a6a0b05dSBen Gardon } 12367cca2d0bSBen Gardon 12377cca2d0bSBen Gardon rcu_read_unlock(); 1238a6a0b05dSBen Gardon return spte_set; 1239a6a0b05dSBen Gardon } 1240a6a0b05dSBen Gardon 1241a6a0b05dSBen Gardon /* 1242a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1243a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1244a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1245a6a0b05dSBen Gardon */ 1246a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, 1247a6a0b05dSBen Gardon int min_level) 1248a6a0b05dSBen Gardon { 1249a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1250a6a0b05dSBen Gardon bool spte_set = false; 1251a6a0b05dSBen Gardon 125224ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1253a6a0b05dSBen Gardon 125424ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1255a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1256a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1257a6a0b05dSBen Gardon 1258a6a0b05dSBen Gardon return spte_set; 1259a6a0b05dSBen Gardon } 1260a6a0b05dSBen Gardon 1261a6a0b05dSBen Gardon /* 1262a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1263a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1264a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1265a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1266a6a0b05dSBen Gardon * be flushed. 1267a6a0b05dSBen Gardon */ 1268a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1269a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1270a6a0b05dSBen Gardon { 1271a6a0b05dSBen Gardon struct tdp_iter iter; 1272a6a0b05dSBen Gardon u64 new_spte; 1273a6a0b05dSBen Gardon bool spte_set = false; 1274a6a0b05dSBen Gardon 12757cca2d0bSBen Gardon rcu_read_lock(); 12767cca2d0bSBen Gardon 1277a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 127824ae4cfaSBen Gardon retry: 127924ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12801af4a960SBen Gardon continue; 12811af4a960SBen Gardon 1282a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1283a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1284a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1285a6a0b05dSBen Gardon else 1286a6a0b05dSBen Gardon continue; 1287a6a0b05dSBen Gardon } else { 1288a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1289a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1290a6a0b05dSBen Gardon else 1291a6a0b05dSBen Gardon continue; 1292a6a0b05dSBen Gardon } 1293a6a0b05dSBen Gardon 129424ae4cfaSBen Gardon if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter, 129524ae4cfaSBen Gardon new_spte)) { 129624ae4cfaSBen Gardon /* 129724ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 129824ae4cfaSBen Gardon * the atomic cmpxchg failed. 129924ae4cfaSBen Gardon */ 130024ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 130124ae4cfaSBen Gardon goto retry; 130224ae4cfaSBen Gardon } 1303a6a0b05dSBen Gardon spte_set = true; 1304a6a0b05dSBen Gardon } 13057cca2d0bSBen Gardon 13067cca2d0bSBen Gardon rcu_read_unlock(); 1307a6a0b05dSBen Gardon return spte_set; 1308a6a0b05dSBen Gardon } 1309a6a0b05dSBen Gardon 1310a6a0b05dSBen Gardon /* 1311a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1312a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1313a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1314a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1315a6a0b05dSBen Gardon * be flushed. 1316a6a0b05dSBen Gardon */ 1317a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot) 1318a6a0b05dSBen Gardon { 1319a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1320a6a0b05dSBen Gardon bool spte_set = false; 1321a6a0b05dSBen Gardon 132224ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1323a6a0b05dSBen Gardon 132424ae4cfaSBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1325a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1326a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1327a6a0b05dSBen Gardon 1328a6a0b05dSBen Gardon return spte_set; 1329a6a0b05dSBen Gardon } 1330a6a0b05dSBen Gardon 1331a6a0b05dSBen Gardon /* 1332a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1333a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1334a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1335a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1336a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1337a6a0b05dSBen Gardon */ 1338a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1339a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1340a6a0b05dSBen Gardon { 1341a6a0b05dSBen Gardon struct tdp_iter iter; 1342a6a0b05dSBen Gardon u64 new_spte; 1343a6a0b05dSBen Gardon 13447cca2d0bSBen Gardon rcu_read_lock(); 13457cca2d0bSBen Gardon 1346a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1347a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1348a6a0b05dSBen Gardon if (!mask) 1349a6a0b05dSBen Gardon break; 1350a6a0b05dSBen Gardon 1351a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1352a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1353a6a0b05dSBen Gardon continue; 1354a6a0b05dSBen Gardon 1355f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1356f1b3b06aSBen Gardon 1357a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1358a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1359a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1360a6a0b05dSBen Gardon else 1361a6a0b05dSBen Gardon continue; 1362a6a0b05dSBen Gardon } else { 1363a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1364a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1365a6a0b05dSBen Gardon else 1366a6a0b05dSBen Gardon continue; 1367a6a0b05dSBen Gardon } 1368a6a0b05dSBen Gardon 1369a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1370a6a0b05dSBen Gardon } 13717cca2d0bSBen Gardon 13727cca2d0bSBen Gardon rcu_read_unlock(); 1373a6a0b05dSBen Gardon } 1374a6a0b05dSBen Gardon 1375a6a0b05dSBen Gardon /* 1376a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1377a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1378a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1379a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1380a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1381a6a0b05dSBen Gardon */ 1382a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1383a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1384a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1385a6a0b05dSBen Gardon bool wrprot) 1386a6a0b05dSBen Gardon { 1387a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1388a6a0b05dSBen Gardon 1389531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1390a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1391a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1392a6a0b05dSBen Gardon } 1393a6a0b05dSBen Gardon 1394a6a0b05dSBen Gardon /* 139587aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 139687aa9ec9SBen Gardon * GFNs within the slot. 139714881998SBen Gardon */ 1398af95b53eSSean Christopherson static bool zap_collapsible_spte_range(struct kvm *kvm, 139914881998SBen Gardon struct kvm_mmu_page *root, 14008ca6f063SBen Gardon const struct kvm_memory_slot *slot, 1401af95b53eSSean Christopherson bool flush) 140214881998SBen Gardon { 14039eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 14049eba50f8SSean Christopherson gfn_t end = start + slot->npages; 140514881998SBen Gardon struct tdp_iter iter; 140614881998SBen Gardon kvm_pfn_t pfn; 140714881998SBen Gardon 14087cca2d0bSBen Gardon rcu_read_lock(); 14097cca2d0bSBen Gardon 141014881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 14112db6f772SBen Gardon retry: 14122db6f772SBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { 1413af95b53eSSean Christopherson flush = false; 14141af4a960SBen Gardon continue; 14151af4a960SBen Gardon } 14161af4a960SBen Gardon 141714881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 141887aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 141914881998SBen Gardon continue; 142014881998SBen Gardon 142114881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 142214881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 14239eba50f8SSean Christopherson iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, 14249eba50f8SSean Christopherson pfn, PG_LEVEL_NUM)) 142514881998SBen Gardon continue; 142614881998SBen Gardon 14272db6f772SBen Gardon if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 14282db6f772SBen Gardon /* 14292db6f772SBen Gardon * The iter must explicitly re-read the SPTE because 14302db6f772SBen Gardon * the atomic cmpxchg failed. 14312db6f772SBen Gardon */ 14322db6f772SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 14332db6f772SBen Gardon goto retry; 14342db6f772SBen Gardon } 1435af95b53eSSean Christopherson flush = true; 143614881998SBen Gardon } 143714881998SBen Gardon 14387cca2d0bSBen Gardon rcu_read_unlock(); 1439af95b53eSSean Christopherson 1440af95b53eSSean Christopherson return flush; 144114881998SBen Gardon } 144214881998SBen Gardon 144314881998SBen Gardon /* 144414881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 144514881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 144614881998SBen Gardon */ 1447142ccde1SSean Christopherson bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 14488ca6f063SBen Gardon const struct kvm_memory_slot *slot, 14498ca6f063SBen Gardon bool flush) 145014881998SBen Gardon { 145114881998SBen Gardon struct kvm_mmu_page *root; 145214881998SBen Gardon 14532db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 145414881998SBen Gardon 14552db6f772SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1456af95b53eSSean Christopherson flush = zap_collapsible_spte_range(kvm, root, slot, flush); 1457af95b53eSSean Christopherson 1458142ccde1SSean Christopherson return flush; 145914881998SBen Gardon } 146046044f72SBen Gardon 146146044f72SBen Gardon /* 146246044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14635fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 146446044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 146546044f72SBen Gardon */ 146646044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 14673ad93562SKeqian Zhu gfn_t gfn, int min_level) 146846044f72SBen Gardon { 146946044f72SBen Gardon struct tdp_iter iter; 147046044f72SBen Gardon u64 new_spte; 147146044f72SBen Gardon bool spte_set = false; 147246044f72SBen Gardon 14733ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 14743ad93562SKeqian Zhu 14757cca2d0bSBen Gardon rcu_read_lock(); 14767cca2d0bSBen Gardon 14773ad93562SKeqian Zhu for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 14783ad93562SKeqian Zhu min_level, gfn, gfn + 1) { 14793ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 14803ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 14813ad93562SKeqian Zhu continue; 14823ad93562SKeqian Zhu 148346044f72SBen Gardon if (!is_writable_pte(iter.old_spte)) 148446044f72SBen Gardon break; 148546044f72SBen Gardon 148646044f72SBen Gardon new_spte = iter.old_spte & 14875fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 148846044f72SBen Gardon 148946044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 149046044f72SBen Gardon spte_set = true; 149146044f72SBen Gardon } 149246044f72SBen Gardon 14937cca2d0bSBen Gardon rcu_read_unlock(); 14947cca2d0bSBen Gardon 149546044f72SBen Gardon return spte_set; 149646044f72SBen Gardon } 149746044f72SBen Gardon 149846044f72SBen Gardon /* 149946044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 15005fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 150146044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 150246044f72SBen Gardon */ 150346044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 15043ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 15053ad93562SKeqian Zhu int min_level) 150646044f72SBen Gardon { 150746044f72SBen Gardon struct kvm_mmu_page *root; 150846044f72SBen Gardon bool spte_set = false; 150946044f72SBen Gardon 1510531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1511a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 15123ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1513a3f15bdaSSean Christopherson 151446044f72SBen Gardon return spte_set; 151546044f72SBen Gardon } 151646044f72SBen Gardon 151795fb5b02SBen Gardon /* 151895fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 151995fb5b02SBen Gardon * That SPTE may be non-present. 152095fb5b02SBen Gardon */ 152139b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 152239b4d43eSSean Christopherson int *root_level) 152395fb5b02SBen Gardon { 152495fb5b02SBen Gardon struct tdp_iter iter; 152595fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 152695fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 15272aa07893SSean Christopherson int leaf = -1; 152895fb5b02SBen Gardon 152939b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 153095fb5b02SBen Gardon 15317cca2d0bSBen Gardon rcu_read_lock(); 15327cca2d0bSBen Gardon 153395fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 153495fb5b02SBen Gardon leaf = iter.level; 1535dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 153695fb5b02SBen Gardon } 153795fb5b02SBen Gardon 15387cca2d0bSBen Gardon rcu_read_unlock(); 15397cca2d0bSBen Gardon 154095fb5b02SBen Gardon return leaf; 154195fb5b02SBen Gardon } 1542