1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17d501f747SBen Gardon bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 20d501f747SBen Gardon return false; 21fe5db27dSBen Gardon 22fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 23fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 2402c00b3aSBen Gardon 2502c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 269a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 2789c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 28d501f747SBen Gardon 29d501f747SBen Gardon return true; 30fe5db27dSBen Gardon } 31fe5db27dSBen Gardon 326103bc07SBen Gardon static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 336103bc07SBen Gardon bool shared) 346103bc07SBen Gardon { 356103bc07SBen Gardon if (shared) 366103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 376103bc07SBen Gardon else 386103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 396103bc07SBen Gardon } 406103bc07SBen Gardon 41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42fe5db27dSBen Gardon { 43fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 44fe5db27dSBen Gardon return; 4502c00b3aSBen Gardon 46524a1e4eSSean Christopherson WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); 4702c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 487cca2d0bSBen Gardon 497cca2d0bSBen Gardon /* 507cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 517cca2d0bSBen Gardon * can run before the VM is torn down. 527cca2d0bSBen Gardon */ 537cca2d0bSBen Gardon rcu_barrier(); 5402c00b3aSBen Gardon } 5502c00b3aSBen Gardon 562bdb3d84SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 576103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 586103bc07SBen Gardon bool shared); 592bdb3d84SBen Gardon 602bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 61a889ea54SBen Gardon { 622bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 632bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 64a889ea54SBen Gardon } 65a889ea54SBen Gardon 66c0e64238SBen Gardon /* 67c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 68c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 69c0e64238SBen Gardon * the memory. 70c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 71c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 72c0e64238SBen Gardon * memory won't use it after it is freed. 73c0e64238SBen Gardon */ 74c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 75a889ea54SBen Gardon { 76c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 77c0e64238SBen Gardon rcu_head); 78a889ea54SBen Gardon 79c0e64238SBen Gardon tdp_mmu_free_sp(sp); 80a889ea54SBen Gardon } 81a889ea54SBen Gardon 826103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 836103bc07SBen Gardon bool shared) 842bdb3d84SBen Gardon { 856103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 862bdb3d84SBen Gardon 8711cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 882bdb3d84SBen Gardon return; 892bdb3d84SBen Gardon 902bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 912bdb3d84SBen Gardon 92c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 93c0e64238SBen Gardon list_del_rcu(&root->link); 94c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 952bdb3d84SBen Gardon 96524a1e4eSSean Christopherson zap_gfn_range(kvm, root, 0, -1ull, false, false, shared); 972bdb3d84SBen Gardon 98c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 99a889ea54SBen Gardon } 100a889ea54SBen Gardon 101cfc10997SBen Gardon /* 102d62007edSSean Christopherson * Returns the next root after @prev_root (or the first root if @prev_root is 103d62007edSSean Christopherson * NULL). A reference to the returned root is acquired, and the reference to 104d62007edSSean Christopherson * @prev_root is released (the caller obviously must hold a reference to 105d62007edSSean Christopherson * @prev_root if it's non-NULL). 106d62007edSSean Christopherson * 107d62007edSSean Christopherson * If @only_valid is true, invalid roots are skipped. 108d62007edSSean Christopherson * 109d62007edSSean Christopherson * Returns NULL if the end of tdp_mmu_roots was reached. 110cfc10997SBen Gardon */ 111cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 1126103bc07SBen Gardon struct kvm_mmu_page *prev_root, 113d62007edSSean Christopherson bool shared, bool only_valid) 114a889ea54SBen Gardon { 115a889ea54SBen Gardon struct kvm_mmu_page *next_root; 116a889ea54SBen Gardon 117c0e64238SBen Gardon rcu_read_lock(); 118c0e64238SBen Gardon 119cfc10997SBen Gardon if (prev_root) 120c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 121c0e64238SBen Gardon &prev_root->link, 122c0e64238SBen Gardon typeof(*prev_root), link); 123cfc10997SBen Gardon else 124c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 125cfc10997SBen Gardon typeof(*next_root), link); 126cfc10997SBen Gardon 12704dc4e6cSSean Christopherson while (next_root) { 128d62007edSSean Christopherson if ((!only_valid || !next_root->role.invalid) && 129ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(next_root)) 13004dc4e6cSSean Christopherson break; 13104dc4e6cSSean Christopherson 132c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 133c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 13404dc4e6cSSean Christopherson } 135fb101293SBen Gardon 136c0e64238SBen Gardon rcu_read_unlock(); 137cfc10997SBen Gardon 138cfc10997SBen Gardon if (prev_root) 1396103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 140cfc10997SBen Gardon 141a889ea54SBen Gardon return next_root; 142a889ea54SBen Gardon } 143a889ea54SBen Gardon 144a889ea54SBen Gardon /* 145a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 146a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 147a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 148a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 1496103bc07SBen Gardon * 1506103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 1516103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 1526103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 153a889ea54SBen Gardon */ 154d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ 155d62007edSSean Christopherson for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ 156cfc10997SBen Gardon _root; \ 157d62007edSSean Christopherson _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ 158a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 159a3f15bdaSSean Christopherson } else 160a889ea54SBen Gardon 161d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 162d62007edSSean Christopherson __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) 163d62007edSSean Christopherson 164d62007edSSean Christopherson #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 165d62007edSSean Christopherson __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false) 166d62007edSSean Christopherson 167a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 168c0e64238SBen Gardon list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ 169c0e64238SBen Gardon lockdep_is_held_type(&kvm->mmu_lock, 0) || \ 170c0e64238SBen Gardon lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \ 171a3f15bdaSSean Christopherson if (kvm_mmu_page_as_id(_root) != _as_id) { \ 172a3f15bdaSSean Christopherson } else 17302c00b3aSBen Gardon 17402c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, 17502c00b3aSBen Gardon int level) 17602c00b3aSBen Gardon { 17702c00b3aSBen Gardon union kvm_mmu_page_role role; 17802c00b3aSBen Gardon 17902c00b3aSBen Gardon role = vcpu->arch.mmu->mmu_role.base; 18002c00b3aSBen Gardon role.level = level; 18102c00b3aSBen Gardon role.direct = true; 182bb3b394dSLai Jiangshan role.has_4_byte_gpte = false; 18302c00b3aSBen Gardon role.access = ACC_ALL; 18487e888eaSPaolo Bonzini role.ad_disabled = !shadow_accessed_mask; 18502c00b3aSBen Gardon 18602c00b3aSBen Gardon return role; 18702c00b3aSBen Gardon } 18802c00b3aSBen Gardon 18902c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, 19002c00b3aSBen Gardon int level) 19102c00b3aSBen Gardon { 19202c00b3aSBen Gardon struct kvm_mmu_page *sp; 19302c00b3aSBen Gardon 19402c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 19502c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 19602c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 19702c00b3aSBen Gardon 19802c00b3aSBen Gardon sp->role.word = page_role_for_level(vcpu, level).word; 19902c00b3aSBen Gardon sp->gfn = gfn; 20002c00b3aSBen Gardon sp->tdp_mmu_page = true; 20102c00b3aSBen Gardon 20233dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 20333dd3574SBen Gardon 20402c00b3aSBen Gardon return sp; 20502c00b3aSBen Gardon } 20602c00b3aSBen Gardon 2076e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 20802c00b3aSBen Gardon { 20902c00b3aSBen Gardon union kvm_mmu_page_role role; 21002c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 21102c00b3aSBen Gardon struct kvm_mmu_page *root; 21202c00b3aSBen Gardon 2136e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 21402c00b3aSBen Gardon 21502c00b3aSBen Gardon role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); 21602c00b3aSBen Gardon 21704dc4e6cSSean Christopherson /* 21804dc4e6cSSean Christopherson * Check for an existing root before allocating a new one. Note, the 21904dc4e6cSSean Christopherson * role check prevents consuming an invalid root. 22004dc4e6cSSean Christopherson */ 221a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 222fb101293SBen Gardon if (root->role.word == role.word && 223ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(root)) 2246e6ec584SSean Christopherson goto out; 22502c00b3aSBen Gardon } 22602c00b3aSBen Gardon 22702c00b3aSBen Gardon root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); 22811cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 22902c00b3aSBen Gardon 230c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 231c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 232c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 23302c00b3aSBen Gardon 2346e6ec584SSean Christopherson out: 23502c00b3aSBen Gardon return __pa(root->spt); 236fe5db27dSBen Gardon } 2372f2fad08SBen Gardon 2382f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2399a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 2409a77daacSBen Gardon bool shared); 2412f2fad08SBen Gardon 242f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 243f8e14497SBen Gardon { 244f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 245f8e14497SBen Gardon return; 246f8e14497SBen Gardon 247f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 24864bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 24964bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 250f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 251f8e14497SBen Gardon } 252f8e14497SBen Gardon 253a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 254a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 255a6a0b05dSBen Gardon { 256a6a0b05dSBen Gardon bool pfn_changed; 257a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 258a6a0b05dSBen Gardon 259a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 260a6a0b05dSBen Gardon return; 261a6a0b05dSBen Gardon 262a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 263a6a0b05dSBen Gardon 264a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 265a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 266a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 267fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 268a6a0b05dSBen Gardon } 269a6a0b05dSBen Gardon } 270a6a0b05dSBen Gardon 2712f2fad08SBen Gardon /** 272a9442f59SBen Gardon * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU 273a9442f59SBen Gardon * 274a9442f59SBen Gardon * @kvm: kvm instance 275a9442f59SBen Gardon * @sp: the new page 276a9442f59SBen Gardon * @account_nx: This page replaces a NX large page and should be marked for 277a9442f59SBen Gardon * eventual reclaim. 278a9442f59SBen Gardon */ 279a9442f59SBen Gardon static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2809653f2daSSean Christopherson bool account_nx) 281a9442f59SBen Gardon { 2829a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 283a9442f59SBen Gardon list_add(&sp->link, &kvm->arch.tdp_mmu_pages); 284a9442f59SBen Gardon if (account_nx) 285a9442f59SBen Gardon account_huge_nx_page(kvm, sp); 2869a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 287a9442f59SBen Gardon } 288a9442f59SBen Gardon 289a9442f59SBen Gardon /** 290a9442f59SBen Gardon * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU 291a9442f59SBen Gardon * 292a9442f59SBen Gardon * @kvm: kvm instance 293a9442f59SBen Gardon * @sp: the page to be removed 2949a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 2959a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 2969a77daacSBen Gardon * threads that might be adding or removing pages. 297a9442f59SBen Gardon */ 2989a77daacSBen Gardon static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2999a77daacSBen Gardon bool shared) 300a9442f59SBen Gardon { 3019a77daacSBen Gardon if (shared) 3029a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 3039a77daacSBen Gardon else 304a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 305a9442f59SBen Gardon 306a9442f59SBen Gardon list_del(&sp->link); 307a9442f59SBen Gardon if (sp->lpage_disallowed) 308a9442f59SBen Gardon unaccount_huge_nx_page(kvm, sp); 3099a77daacSBen Gardon 3109a77daacSBen Gardon if (shared) 3119a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 312a9442f59SBen Gardon } 313a9442f59SBen Gardon 314a9442f59SBen Gardon /** 315a066e61fSBen Gardon * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure 316a066e61fSBen Gardon * 317a066e61fSBen Gardon * @kvm: kvm instance 318a066e61fSBen Gardon * @pt: the page removed from the paging structure 3199a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 3209a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 3219a77daacSBen Gardon * threads that might be modifying SPTEs. 322a066e61fSBen Gardon * 323a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 324a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 32570fb3e41SBen Gardon * 32670fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 32770fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 32870fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 32970fb3e41SBen Gardon * early rcu_dereferences in the function. 330a066e61fSBen Gardon */ 33170fb3e41SBen Gardon static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, 3329a77daacSBen Gardon bool shared) 333a066e61fSBen Gardon { 33470fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 335a066e61fSBen Gardon int level = sp->role.level; 336e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 337a066e61fSBen Gardon int i; 338a066e61fSBen Gardon 339a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 340a066e61fSBen Gardon 3419a77daacSBen Gardon tdp_mmu_unlink_page(kvm, sp, shared); 342a066e61fSBen Gardon 343a066e61fSBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 344574c3c55SBen Gardon u64 *sptep = rcu_dereference(pt) + i; 345574c3c55SBen Gardon gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 346574c3c55SBen Gardon u64 old_child_spte; 3479a77daacSBen Gardon 3489a77daacSBen Gardon if (shared) { 349e25f0e0cSBen Gardon /* 350e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 351e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 352e25f0e0cSBen Gardon * already marked as removed then another thread 353e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 354e25f0e0cSBen Gardon * set the SPTE until it is set from some other 355e25f0e0cSBen Gardon * value to the removed SPTE value. 356e25f0e0cSBen Gardon */ 357e25f0e0cSBen Gardon for (;;) { 358e25f0e0cSBen Gardon old_child_spte = xchg(sptep, REMOVED_SPTE); 359e25f0e0cSBen Gardon if (!is_removed_spte(old_child_spte)) 360e25f0e0cSBen Gardon break; 361e25f0e0cSBen Gardon cpu_relax(); 362e25f0e0cSBen Gardon } 3639a77daacSBen Gardon } else { 3648df9f1afSSean Christopherson /* 3658df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 3668df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 3678df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 3688df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 3698df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 3708df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 3718df9f1afSSean Christopherson * unreachable. 3728df9f1afSSean Christopherson */ 3739a77daacSBen Gardon old_child_spte = READ_ONCE(*sptep); 3748df9f1afSSean Christopherson if (!is_shadow_present_pte(old_child_spte)) 3758df9f1afSSean Christopherson continue; 376e25f0e0cSBen Gardon 377e25f0e0cSBen Gardon /* 378e25f0e0cSBen Gardon * Marking the SPTE as a removed SPTE is not 379e25f0e0cSBen Gardon * strictly necessary here as the MMU lock will 380e25f0e0cSBen Gardon * stop other threads from concurrently modifying 381e25f0e0cSBen Gardon * this SPTE. Using the removed SPTE value keeps 382e25f0e0cSBen Gardon * the two branches consistent and simplifies 383e25f0e0cSBen Gardon * the function. 384e25f0e0cSBen Gardon */ 385e25f0e0cSBen Gardon WRITE_ONCE(*sptep, REMOVED_SPTE); 3869a77daacSBen Gardon } 387e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 388f1b83255SKai Huang old_child_spte, REMOVED_SPTE, level, 389e25f0e0cSBen Gardon shared); 390a066e61fSBen Gardon } 391a066e61fSBen Gardon 392574c3c55SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, base_gfn, 393f1b83255SKai Huang KVM_PAGES_PER_HPAGE(level + 1)); 394a066e61fSBen Gardon 3957cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 396a066e61fSBen Gardon } 397a066e61fSBen Gardon 398a066e61fSBen Gardon /** 3997f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 4002f2fad08SBen Gardon * @kvm: kvm instance 4012f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 4022f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 4032f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 4042f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 4052f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 4069a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 4079a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 4089a77daacSBen Gardon * threads that might be modifying SPTEs. 4092f2fad08SBen Gardon * 4102f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 4112f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 4122f2fad08SBen Gardon */ 4132f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4149a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4159a77daacSBen Gardon bool shared) 4162f2fad08SBen Gardon { 4172f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 4182f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 4192f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 4202f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 4212f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 4222f2fad08SBen Gardon 4232f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 4242f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 425764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 4262f2fad08SBen Gardon 4272f2fad08SBen Gardon /* 4282f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 4292f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 4302f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 431d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 4322f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 4332f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 4342f2fad08SBen Gardon * thread before replacement. 4352f2fad08SBen Gardon */ 4362f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 4372f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 4382f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 4392f2fad08SBen Gardon "different PFN!\n" 4402f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4412f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4422f2fad08SBen Gardon 4432f2fad08SBen Gardon /* 4442f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 445d9f6e12fSIngo Molnar * corruption. 4462f2fad08SBen Gardon */ 4472f2fad08SBen Gardon BUG(); 4482f2fad08SBen Gardon } 4492f2fad08SBen Gardon 4502f2fad08SBen Gardon if (old_spte == new_spte) 4512f2fad08SBen Gardon return; 4522f2fad08SBen Gardon 453b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 454b9a98c34SBen Gardon 455*115111efSDavid Matlack if (is_leaf) 456*115111efSDavid Matlack check_spte_writable_invariants(new_spte); 457*115111efSDavid Matlack 4582f2fad08SBen Gardon /* 4592f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 4602f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 4612f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 4622f2fad08SBen Gardon */ 4632f2fad08SBen Gardon if (!was_present && !is_present) { 4642f2fad08SBen Gardon /* 46508f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 46608f07c80SBen Gardon * it is unexpected. Log the change, though it should not 46708f07c80SBen Gardon * impact the guest since both the former and current SPTEs 46808f07c80SBen Gardon * are nonpresent. 4692f2fad08SBen Gardon */ 47008f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 47108f07c80SBen Gardon !is_mmio_spte(new_spte) && 47208f07c80SBen Gardon !is_removed_spte(new_spte))) 4732f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 4742f2fad08SBen Gardon "should not be replaced with another,\n" 4752f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 47608f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 47708f07c80SBen Gardon "a temporary removed SPTE.\n" 4782f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 4792f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 4802f2fad08SBen Gardon return; 4812f2fad08SBen Gardon } 4822f2fad08SBen Gardon 48371f51d2cSMingwei Zhang if (is_leaf != was_leaf) 48471f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 4852f2fad08SBen Gardon 4862f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 48764bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 4882f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 4892f2fad08SBen Gardon 4902f2fad08SBen Gardon /* 4912f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 4922f2fad08SBen Gardon * the paging structure. 4932f2fad08SBen Gardon */ 494a066e61fSBen Gardon if (was_present && !was_leaf && (pfn_changed || !is_present)) 495a066e61fSBen Gardon handle_removed_tdp_mmu_page(kvm, 4969a77daacSBen Gardon spte_to_child_pt(old_spte, level), shared); 4972f2fad08SBen Gardon } 4982f2fad08SBen Gardon 4992f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 5009a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 5019a77daacSBen Gardon bool shared) 5022f2fad08SBen Gardon { 5039a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 5049a77daacSBen Gardon shared); 505f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 506a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 507a6a0b05dSBen Gardon new_spte, level); 5082f2fad08SBen Gardon } 509faaf05b0SBen Gardon 510fe43fa2fSBen Gardon /* 5116ccf4438SPaolo Bonzini * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 5126ccf4438SPaolo Bonzini * and handle the associated bookkeeping. Do not mark the page dirty 51324ae4cfaSBen Gardon * in KVM's dirty bitmaps. 5149a77daacSBen Gardon * 5159a77daacSBen Gardon * @kvm: kvm instance 5169a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 5179a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 5189a77daacSBen Gardon * Returns: true if the SPTE was set, false if it was not. If false is returned, 5199a77daacSBen Gardon * this function will have no side-effects. 5209a77daacSBen Gardon */ 5216ccf4438SPaolo Bonzini static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm, 5229a77daacSBen Gardon struct tdp_iter *iter, 5239a77daacSBen Gardon u64 new_spte) 5249a77daacSBen Gardon { 5253a0f64deSSean Christopherson WARN_ON_ONCE(iter->yielded); 5263a0f64deSSean Christopherson 5279a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 5289a77daacSBen Gardon 52908f07c80SBen Gardon /* 53008f07c80SBen Gardon * Do not change removed SPTEs. Only the thread that froze the SPTE 53108f07c80SBen Gardon * may modify it. 53208f07c80SBen Gardon */ 5337a51393aSSean Christopherson if (is_removed_spte(iter->old_spte)) 53408f07c80SBen Gardon return false; 53508f07c80SBen Gardon 5366e8eb206SDavid Matlack /* 5376e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 5386e8eb206SDavid Matlack * does not hold the mmu_lock. 5396e8eb206SDavid Matlack */ 5409a77daacSBen Gardon if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, 5419a77daacSBen Gardon new_spte) != iter->old_spte) 5429a77daacSBen Gardon return false; 5439a77daacSBen Gardon 54424ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 54508889894SSean Christopherson new_spte, iter->level, true); 54624ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 5479a77daacSBen Gardon 5489a77daacSBen Gardon return true; 5499a77daacSBen Gardon } 5509a77daacSBen Gardon 55108f07c80SBen Gardon static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, 55208f07c80SBen Gardon struct tdp_iter *iter) 55308f07c80SBen Gardon { 55408f07c80SBen Gardon /* 55508f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 55608f07c80SBen Gardon * non-present value. This will stop other threads from 55708f07c80SBen Gardon * immediately installing a present entry in its place 55808f07c80SBen Gardon * before the TLBs are flushed. 55908f07c80SBen Gardon */ 5606ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE)) 56108f07c80SBen Gardon return false; 56208f07c80SBen Gardon 56308f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 56408f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 56508f07c80SBen Gardon 56608f07c80SBen Gardon /* 56708f07c80SBen Gardon * No other thread can overwrite the removed SPTE as they 56808f07c80SBen Gardon * must either wait on the MMU lock or use 569d9f6e12fSIngo Molnar * tdp_mmu_set_spte_atomic which will not overwrite the 57008f07c80SBen Gardon * special removed SPTE value. No bookkeeping is needed 57108f07c80SBen Gardon * here since the SPTE is going from non-present 57208f07c80SBen Gardon * to non-present. 57308f07c80SBen Gardon */ 57414f6fec2SBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), 0); 57508f07c80SBen Gardon 57608f07c80SBen Gardon return true; 57708f07c80SBen Gardon } 57808f07c80SBen Gardon 5799a77daacSBen Gardon 5809a77daacSBen Gardon /* 581fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 582fe43fa2fSBen Gardon * @kvm: kvm instance 583fe43fa2fSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 584fe43fa2fSBen Gardon * @new_spte: The value the SPTE should be set to 585fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 586fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 587fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 588fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 589fe43fa2fSBen Gardon * double counted. 590fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 591fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 592fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 593fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 594fe43fa2fSBen Gardon * writes from being double counted. 595fe43fa2fSBen Gardon */ 596f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 597a6a0b05dSBen Gardon u64 new_spte, bool record_acc_track, 598a6a0b05dSBen Gardon bool record_dirty_log) 599faaf05b0SBen Gardon { 6003a0f64deSSean Christopherson WARN_ON_ONCE(iter->yielded); 6013a0f64deSSean Christopherson 602531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 6033a9a4aa5SBen Gardon 60408f07c80SBen Gardon /* 60508f07c80SBen Gardon * No thread should be using this function to set SPTEs to the 60608f07c80SBen Gardon * temporary removed SPTE value. 60708f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 60808f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 60908f07c80SBen Gardon * use of the removed SPTE should not be necessary. 61008f07c80SBen Gardon */ 6117a51393aSSean Christopherson WARN_ON(is_removed_spte(iter->old_spte)); 61208f07c80SBen Gardon 6137cca2d0bSBen Gardon WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); 614faaf05b0SBen Gardon 61508889894SSean Christopherson __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 61608889894SSean Christopherson new_spte, iter->level, false); 617f8e14497SBen Gardon if (record_acc_track) 618f8e14497SBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, 619f8e14497SBen Gardon iter->level); 620a6a0b05dSBen Gardon if (record_dirty_log) 62108889894SSean Christopherson handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, 622a6a0b05dSBen Gardon iter->old_spte, new_spte, 623a6a0b05dSBen Gardon iter->level); 624f8e14497SBen Gardon } 625f8e14497SBen Gardon 626f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 627f8e14497SBen Gardon u64 new_spte) 628f8e14497SBen Gardon { 629a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 630f8e14497SBen Gardon } 631f8e14497SBen Gardon 632f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 633f8e14497SBen Gardon struct tdp_iter *iter, 634f8e14497SBen Gardon u64 new_spte) 635f8e14497SBen Gardon { 636a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 637a6a0b05dSBen Gardon } 638a6a0b05dSBen Gardon 639a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 640a6a0b05dSBen Gardon struct tdp_iter *iter, 641a6a0b05dSBen Gardon u64 new_spte) 642a6a0b05dSBen Gardon { 643a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 644faaf05b0SBen Gardon } 645faaf05b0SBen Gardon 646faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 647faaf05b0SBen Gardon for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) 648faaf05b0SBen Gardon 649f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 650f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 651f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 652f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 653f8e14497SBen Gardon continue; \ 654f8e14497SBen Gardon else 655f8e14497SBen Gardon 656bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 657bb18842eSBen Gardon for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ 658bb18842eSBen Gardon _mmu->shadow_root_level, _start, _end) 659bb18842eSBen Gardon 660faaf05b0SBen Gardon /* 661e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 662e28a436cSBen Gardon * to the scheduler. 663e28a436cSBen Gardon * 664e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 665e139a34eSBen Gardon * TLB flush before yielding. 666e139a34eSBen Gardon * 6673a0f64deSSean Christopherson * If this function yields, iter->yielded is set and the caller must skip to 6683a0f64deSSean Christopherson * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 6693a0f64deSSean Christopherson * over the paging structures to allow the iterator to continue its traversal 6703a0f64deSSean Christopherson * from the paging structure root. 671e28a436cSBen Gardon * 6723a0f64deSSean Christopherson * Returns true if this function yielded. 673e28a436cSBen Gardon */ 6743a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 6753a0f64deSSean Christopherson struct tdp_iter *iter, 6763a0f64deSSean Christopherson bool flush, bool shared) 677a6a0b05dSBen Gardon { 6783a0f64deSSean Christopherson WARN_ON(iter->yielded); 6793a0f64deSSean Christopherson 680ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 681ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 682ed5e484bSBen Gardon return false; 683ed5e484bSBen Gardon 684531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 6857cca2d0bSBen Gardon rcu_read_unlock(); 6867cca2d0bSBen Gardon 687e139a34eSBen Gardon if (flush) 688e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 689e139a34eSBen Gardon 6906103bc07SBen Gardon if (shared) 6916103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 6926103bc07SBen Gardon else 693531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 6946103bc07SBen Gardon 6957cca2d0bSBen Gardon rcu_read_lock(); 696ed5e484bSBen Gardon 697ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 698ed5e484bSBen Gardon 6993a0f64deSSean Christopherson iter->yielded = true; 700a6a0b05dSBen Gardon } 701e28a436cSBen Gardon 7023a0f64deSSean Christopherson return iter->yielded; 703a6a0b05dSBen Gardon } 704a6a0b05dSBen Gardon 705faaf05b0SBen Gardon /* 706faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 707faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 708faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 709faaf05b0SBen Gardon * MMU lock. 7106103bc07SBen Gardon * 711063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 712063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 713063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 714063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 7156103bc07SBen Gardon * operation can cause a soft lockup. 7166103bc07SBen Gardon * 7176103bc07SBen Gardon * If shared is true, this thread holds the MMU lock in read mode and must 7186103bc07SBen Gardon * account for the possibility that other threads are modifying the paging 7196103bc07SBen Gardon * structures concurrently. If shared is false, this thread should hold the 7206103bc07SBen Gardon * MMU lock in write mode. 721faaf05b0SBen Gardon */ 722faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 7236103bc07SBen Gardon gfn_t start, gfn_t end, bool can_yield, bool flush, 7246103bc07SBen Gardon bool shared) 725faaf05b0SBen Gardon { 726524a1e4eSSean Christopherson gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 727524a1e4eSSean Christopherson bool zap_all = (start == 0 && end >= max_gfn_host); 728faaf05b0SBen Gardon struct tdp_iter iter; 729faaf05b0SBen Gardon 730524a1e4eSSean Christopherson /* 7310103098fSSean Christopherson * No need to try to step down in the iterator when zapping all SPTEs, 7320103098fSSean Christopherson * zapping the top-level non-leaf SPTEs will recurse on their children. 7330103098fSSean Christopherson */ 7340103098fSSean Christopherson int min_level = zap_all ? root->role.level : PG_LEVEL_4K; 7350103098fSSean Christopherson 7360103098fSSean Christopherson /* 737524a1e4eSSean Christopherson * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will 738524a1e4eSSean Christopherson * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF, 739524a1e4eSSean Christopherson * and so KVM will never install a SPTE for such addresses. 740524a1e4eSSean Christopherson */ 741524a1e4eSSean Christopherson end = min(end, max_gfn_host); 742524a1e4eSSean Christopherson 7436103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 7446103bc07SBen Gardon 7457cca2d0bSBen Gardon rcu_read_lock(); 7467cca2d0bSBen Gardon 7470103098fSSean Christopherson for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 7480103098fSSean Christopherson min_level, start, end) { 7496103bc07SBen Gardon retry: 7501af4a960SBen Gardon if (can_yield && 7516103bc07SBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) { 752a835429cSSean Christopherson flush = false; 7531af4a960SBen Gardon continue; 7541af4a960SBen Gardon } 7551af4a960SBen Gardon 756faaf05b0SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 757faaf05b0SBen Gardon continue; 758faaf05b0SBen Gardon 759faaf05b0SBen Gardon /* 760faaf05b0SBen Gardon * If this is a non-last-level SPTE that covers a larger range 761faaf05b0SBen Gardon * than should be zapped, continue, and zap the mappings at a 762524a1e4eSSean Christopherson * lower level, except when zapping all SPTEs. 763faaf05b0SBen Gardon */ 764524a1e4eSSean Christopherson if (!zap_all && 765524a1e4eSSean Christopherson (iter.gfn < start || 766faaf05b0SBen Gardon iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 767faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 768faaf05b0SBen Gardon continue; 769faaf05b0SBen Gardon 7706103bc07SBen Gardon if (!shared) { 771faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 772a835429cSSean Christopherson flush = true; 7736103bc07SBen Gardon } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 7746103bc07SBen Gardon /* 7756103bc07SBen Gardon * The iter must explicitly re-read the SPTE because 7766103bc07SBen Gardon * the atomic cmpxchg failed. 7776103bc07SBen Gardon */ 7786103bc07SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 7796103bc07SBen Gardon goto retry; 7806103bc07SBen Gardon } 781faaf05b0SBen Gardon } 7827cca2d0bSBen Gardon 7837cca2d0bSBen Gardon rcu_read_unlock(); 784a835429cSSean Christopherson return flush; 785faaf05b0SBen Gardon } 786faaf05b0SBen Gardon 787faaf05b0SBen Gardon /* 788faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 789faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 790faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 791faaf05b0SBen Gardon * MMU lock. 792faaf05b0SBen Gardon */ 7932b9663d8SSean Christopherson bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 7945a324c24SSean Christopherson gfn_t end, bool can_yield, bool flush) 795faaf05b0SBen Gardon { 796faaf05b0SBen Gardon struct kvm_mmu_page *root; 797faaf05b0SBen Gardon 7985a324c24SSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false) 7996103bc07SBen Gardon flush = zap_gfn_range(kvm, root, start, end, can_yield, flush, 8005a324c24SSean Christopherson false); 801faaf05b0SBen Gardon 802faaf05b0SBen Gardon return flush; 803faaf05b0SBen Gardon } 804faaf05b0SBen Gardon 805faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 806faaf05b0SBen Gardon { 8072b9663d8SSean Christopherson bool flush = false; 8082b9663d8SSean Christopherson int i; 809faaf05b0SBen Gardon 8102b9663d8SSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 8115a324c24SSean Christopherson flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush); 8122b9663d8SSean Christopherson 813faaf05b0SBen Gardon if (flush) 814faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 815faaf05b0SBen Gardon } 816bb18842eSBen Gardon 8174c6654bdSBen Gardon static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm, 8184c6654bdSBen Gardon struct kvm_mmu_page *prev_root) 8194c6654bdSBen Gardon { 8204c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8214c6654bdSBen Gardon 8224c6654bdSBen Gardon if (prev_root) 8234c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8244c6654bdSBen Gardon &prev_root->link, 8254c6654bdSBen Gardon typeof(*prev_root), link); 8264c6654bdSBen Gardon else 8274c6654bdSBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8284c6654bdSBen Gardon typeof(*next_root), link); 8294c6654bdSBen Gardon 8304c6654bdSBen Gardon while (next_root && !(next_root->role.invalid && 8314c6654bdSBen Gardon refcount_read(&next_root->tdp_mmu_root_count))) 8324c6654bdSBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 8334c6654bdSBen Gardon &next_root->link, 8344c6654bdSBen Gardon typeof(*next_root), link); 8354c6654bdSBen Gardon 8364c6654bdSBen Gardon return next_root; 8374c6654bdSBen Gardon } 8384c6654bdSBen Gardon 8394c6654bdSBen Gardon /* 8404c6654bdSBen Gardon * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each 8414c6654bdSBen Gardon * invalidated root, they will not be freed until this function drops the 8424c6654bdSBen Gardon * reference. Before dropping that reference, tear down the paging 8434c6654bdSBen Gardon * structure so that whichever thread does drop the last reference 8444c6654bdSBen Gardon * only has to do a trivial amount of work. Since the roots are invalid, 8454c6654bdSBen Gardon * no new SPTEs should be created under them. 8464c6654bdSBen Gardon */ 8474c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 8484c6654bdSBen Gardon { 8494c6654bdSBen Gardon struct kvm_mmu_page *next_root; 8504c6654bdSBen Gardon struct kvm_mmu_page *root; 8514c6654bdSBen Gardon bool flush = false; 8524c6654bdSBen Gardon 8534c6654bdSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 8544c6654bdSBen Gardon 8554c6654bdSBen Gardon rcu_read_lock(); 8564c6654bdSBen Gardon 8574c6654bdSBen Gardon root = next_invalidated_root(kvm, NULL); 8584c6654bdSBen Gardon 8594c6654bdSBen Gardon while (root) { 8604c6654bdSBen Gardon next_root = next_invalidated_root(kvm, root); 8614c6654bdSBen Gardon 8624c6654bdSBen Gardon rcu_read_unlock(); 8634c6654bdSBen Gardon 864524a1e4eSSean Christopherson flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true); 8654c6654bdSBen Gardon 8664c6654bdSBen Gardon /* 8674c6654bdSBen Gardon * Put the reference acquired in 8684c6654bdSBen Gardon * kvm_tdp_mmu_invalidate_roots 8694c6654bdSBen Gardon */ 8704c6654bdSBen Gardon kvm_tdp_mmu_put_root(kvm, root, true); 8714c6654bdSBen Gardon 8724c6654bdSBen Gardon root = next_root; 8734c6654bdSBen Gardon 8744c6654bdSBen Gardon rcu_read_lock(); 8754c6654bdSBen Gardon } 8764c6654bdSBen Gardon 8774c6654bdSBen Gardon rcu_read_unlock(); 8784c6654bdSBen Gardon 8794c6654bdSBen Gardon if (flush) 8804c6654bdSBen Gardon kvm_flush_remote_tlbs(kvm); 8814c6654bdSBen Gardon } 8824c6654bdSBen Gardon 883bb18842eSBen Gardon /* 884b7cccd39SBen Gardon * Mark each TDP MMU root as invalid so that other threads 885b7cccd39SBen Gardon * will drop their references and allow the root count to 886b7cccd39SBen Gardon * go to 0. 887b7cccd39SBen Gardon * 8884c6654bdSBen Gardon * Also take a reference on all roots so that this thread 8894c6654bdSBen Gardon * can do the bulk of the work required to free the roots 8904c6654bdSBen Gardon * once they are invalidated. Without this reference, a 8914c6654bdSBen Gardon * vCPU thread might drop the last reference to a root and 8924c6654bdSBen Gardon * get stuck with tearing down the entire paging structure. 8934c6654bdSBen Gardon * 8944c6654bdSBen Gardon * Roots which have a zero refcount should be skipped as 8954c6654bdSBen Gardon * they're already being torn down. 8964c6654bdSBen Gardon * Already invalid roots should be referenced again so that 8974c6654bdSBen Gardon * they aren't freed before kvm_tdp_mmu_zap_all_fast is 8984c6654bdSBen Gardon * done with them. 8994c6654bdSBen Gardon * 900b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 901b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 902b7cccd39SBen Gardon */ 903b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 904b7cccd39SBen Gardon { 905b7cccd39SBen Gardon struct kvm_mmu_page *root; 906b7cccd39SBen Gardon 907b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 908b7cccd39SBen Gardon list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) 9094c6654bdSBen Gardon if (refcount_inc_not_zero(&root->tdp_mmu_root_count)) 910b7cccd39SBen Gardon root->role.invalid = true; 911b7cccd39SBen Gardon } 912b7cccd39SBen Gardon 913bb18842eSBen Gardon /* 914bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 915bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 916bb18842eSBen Gardon */ 917cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 918cdc47767SPaolo Bonzini struct kvm_page_fault *fault, 919cdc47767SPaolo Bonzini struct tdp_iter *iter) 920bb18842eSBen Gardon { 921c435d4b7SSean Christopherson struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 922bb18842eSBen Gardon u64 new_spte; 92357a3e96dSKai Huang int ret = RET_PF_FIXED; 924ad67e480SPaolo Bonzini bool wrprot = false; 925bb18842eSBen Gardon 9267158bee4SPaolo Bonzini WARN_ON(sp->role.level != fault->goal_level); 927e710c5f6SDavid Matlack if (unlikely(!fault->slot)) 928bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 9299a77daacSBen Gardon else 93053597858SDavid Matlack wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 9312839180cSPaolo Bonzini fault->pfn, iter->old_spte, fault->prefetch, true, 9327158bee4SPaolo Bonzini fault->map_writable, &new_spte); 933bb18842eSBen Gardon 934bb18842eSBen Gardon if (new_spte == iter->old_spte) 935bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 9366ccf4438SPaolo Bonzini else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 9379a77daacSBen Gardon return RET_PF_RETRY; 938bb18842eSBen Gardon 939bb18842eSBen Gardon /* 940bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 941bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 942bb18842eSBen Gardon * the vCPU would have the same fault again. 943bb18842eSBen Gardon */ 944ad67e480SPaolo Bonzini if (wrprot) { 945cdc47767SPaolo Bonzini if (fault->write) 946bb18842eSBen Gardon ret = RET_PF_EMULATE; 947bb18842eSBen Gardon } 948bb18842eSBen Gardon 949bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 9509a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 9519a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 9529a77daacSBen Gardon new_spte); 953bb18842eSBen Gardon ret = RET_PF_EMULATE; 9543849e092SSean Christopherson } else { 9559a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 9569a77daacSBen Gardon rcu_dereference(iter->sptep)); 9573849e092SSean Christopherson } 958bb18842eSBen Gardon 959857f8474SKai Huang /* 960857f8474SKai Huang * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be 961857f8474SKai Huang * consistent with legacy MMU behavior. 962857f8474SKai Huang */ 963857f8474SKai Huang if (ret != RET_PF_SPURIOUS) 964bb18842eSBen Gardon vcpu->stat.pf_fixed++; 965bb18842eSBen Gardon 966bb18842eSBen Gardon return ret; 967bb18842eSBen Gardon } 968bb18842eSBen Gardon 969bb18842eSBen Gardon /* 970bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 971bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 972bb18842eSBen Gardon */ 9732f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 974bb18842eSBen Gardon { 975bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 976bb18842eSBen Gardon struct tdp_iter iter; 97789c0fd49SBen Gardon struct kvm_mmu_page *sp; 978bb18842eSBen Gardon u64 *child_pt; 979bb18842eSBen Gardon u64 new_spte; 980bb18842eSBen Gardon int ret; 981bb18842eSBen Gardon 98273a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault); 983bb18842eSBen Gardon 984f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault); 9857cca2d0bSBen Gardon 9867cca2d0bSBen Gardon rcu_read_lock(); 9877cca2d0bSBen Gardon 9882f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 98973a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled) 990536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 991bb18842eSBen Gardon 99273a3c659SPaolo Bonzini if (iter.level == fault->goal_level) 993bb18842eSBen Gardon break; 994bb18842eSBen Gardon 995bb18842eSBen Gardon /* 996bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 997bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 998bb18842eSBen Gardon * with a non-leaf SPTE. 999bb18842eSBen Gardon */ 1000bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 1001bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 100208f07c80SBen Gardon if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 10039a77daacSBen Gardon break; 1004bb18842eSBen Gardon 1005bb18842eSBen Gardon /* 1006bb18842eSBen Gardon * The iter must explicitly re-read the spte here 1007bb18842eSBen Gardon * because the new value informs the !present 1008bb18842eSBen Gardon * path below. 1009bb18842eSBen Gardon */ 10107cca2d0bSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 1011bb18842eSBen Gardon } 1012bb18842eSBen Gardon 1013bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 1014ff76d506SKai Huang /* 1015c4342633SIngo Molnar * If SPTE has been frozen by another thread, just 1016ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 1017ff76d506SKai Huang * allocation and free. 1018ff76d506SKai Huang */ 1019ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1020ff76d506SKai Huang break; 1021ff76d506SKai Huang 1022f1b83255SKai Huang sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1); 102389c0fd49SBen Gardon child_pt = sp->spt; 1024a9442f59SBen Gardon 1025bb18842eSBen Gardon new_spte = make_nonleaf_spte(child_pt, 1026bb18842eSBen Gardon !shadow_accessed_mask); 1027bb18842eSBen Gardon 10286ccf4438SPaolo Bonzini if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) { 10299653f2daSSean Christopherson tdp_mmu_link_page(vcpu->kvm, sp, 103073a3c659SPaolo Bonzini fault->huge_page_disallowed && 103173a3c659SPaolo Bonzini fault->req_level >= iter.level); 10329a77daacSBen Gardon 1033bb18842eSBen Gardon trace_kvm_mmu_get_page(sp, true); 10349a77daacSBen Gardon } else { 10359a77daacSBen Gardon tdp_mmu_free_sp(sp); 10369a77daacSBen Gardon break; 10379a77daacSBen Gardon } 1038bb18842eSBen Gardon } 1039bb18842eSBen Gardon } 1040bb18842eSBen Gardon 104173a3c659SPaolo Bonzini if (iter.level != fault->goal_level) { 10427cca2d0bSBen Gardon rcu_read_unlock(); 1043bb18842eSBen Gardon return RET_PF_RETRY; 10447cca2d0bSBen Gardon } 1045bb18842eSBen Gardon 1046cdc47767SPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 10477cca2d0bSBen Gardon rcu_read_unlock(); 1048bb18842eSBen Gardon 1049bb18842eSBen Gardon return ret; 1050bb18842eSBen Gardon } 1051063afacdSBen Gardon 10523039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 10533039bcc7SSean Christopherson bool flush) 1054063afacdSBen Gardon { 105583b83a02SSean Christopherson return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start, 105683b83a02SSean Christopherson range->end, range->may_block, flush); 10573039bcc7SSean Christopherson } 10583039bcc7SSean Christopherson 10593039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 10603039bcc7SSean Christopherson struct kvm_gfn_range *range); 10613039bcc7SSean Christopherson 10623039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 10633039bcc7SSean Christopherson struct kvm_gfn_range *range, 1064c1b91493SSean Christopherson tdp_handler_t handler) 1065063afacdSBen Gardon { 1066063afacdSBen Gardon struct kvm_mmu_page *root; 10673039bcc7SSean Christopherson struct tdp_iter iter; 10683039bcc7SSean Christopherson bool ret = false; 1069063afacdSBen Gardon 10703039bcc7SSean Christopherson rcu_read_lock(); 1071063afacdSBen Gardon 1072063afacdSBen Gardon /* 1073e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1074e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1075063afacdSBen Gardon */ 10763039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 10773039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 10783039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 10793039bcc7SSean Christopherson } 1080063afacdSBen Gardon 10813039bcc7SSean Christopherson rcu_read_unlock(); 1082063afacdSBen Gardon 1083063afacdSBen Gardon return ret; 1084063afacdSBen Gardon } 1085063afacdSBen Gardon 1086f8e14497SBen Gardon /* 1087f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1088f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1089f8e14497SBen Gardon */ 10903039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 10913039bcc7SSean Christopherson struct kvm_gfn_range *range) 1092f8e14497SBen Gardon { 1093f8e14497SBen Gardon u64 new_spte = 0; 1094f8e14497SBen Gardon 10953039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 10963039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 10973039bcc7SSean Christopherson return false; 10987cca2d0bSBen Gardon 10993039bcc7SSean Christopherson new_spte = iter->old_spte; 1100f8e14497SBen Gardon 1101f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 11028f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1103f8e14497SBen Gardon } else { 1104f8e14497SBen Gardon /* 1105f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1106f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1107f8e14497SBen Gardon */ 1108f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1109f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1110f8e14497SBen Gardon 1111f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1112f8e14497SBen Gardon } 1113f8e14497SBen Gardon 11143039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 111533dd3574SBen Gardon 11163039bcc7SSean Christopherson return true; 1117f8e14497SBen Gardon } 1118f8e14497SBen Gardon 11193039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1120f8e14497SBen Gardon { 11213039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1122f8e14497SBen Gardon } 1123f8e14497SBen Gardon 11243039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 11253039bcc7SSean Christopherson struct kvm_gfn_range *range) 1126f8e14497SBen Gardon { 11273039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1128f8e14497SBen Gardon } 1129f8e14497SBen Gardon 11303039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1131f8e14497SBen Gardon { 11323039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 11333039bcc7SSean Christopherson } 11343039bcc7SSean Christopherson 11353039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 11363039bcc7SSean Christopherson struct kvm_gfn_range *range) 11373039bcc7SSean Christopherson { 11383039bcc7SSean Christopherson u64 new_spte; 11393039bcc7SSean Christopherson 11403039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 11413039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 11423039bcc7SSean Christopherson 11433039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 11443039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 11453039bcc7SSean Christopherson return false; 11463039bcc7SSean Christopherson 11473039bcc7SSean Christopherson /* 11483039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 11493039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 11503039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 11513039bcc7SSean Christopherson * See __handle_changed_spte(). 11523039bcc7SSean Christopherson */ 11533039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 11543039bcc7SSean Christopherson 11553039bcc7SSean Christopherson if (!pte_write(range->pte)) { 11563039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 11573039bcc7SSean Christopherson pte_pfn(range->pte)); 11583039bcc7SSean Christopherson 11593039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 11603039bcc7SSean Christopherson } 11613039bcc7SSean Christopherson 11623039bcc7SSean Christopherson return true; 1163f8e14497SBen Gardon } 11641d8dd6b3SBen Gardon 11651d8dd6b3SBen Gardon /* 11661d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 11671d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 11681d8dd6b3SBen Gardon * notifier. 11691d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 11701d8dd6b3SBen Gardon */ 11713039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 11721d8dd6b3SBen Gardon { 11733039bcc7SSean Christopherson bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 11741d8dd6b3SBen Gardon 11753039bcc7SSean Christopherson /* FIXME: return 'flush' instead of flushing here. */ 11763039bcc7SSean Christopherson if (flush) 11773039bcc7SSean Christopherson kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); 11787cca2d0bSBen Gardon 11793039bcc7SSean Christopherson return false; 11801d8dd6b3SBen Gardon } 11811d8dd6b3SBen Gardon 1182a6a0b05dSBen Gardon /* 1183bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1184bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1185bedd9195SDavid Matlack * be flushed. 1186a6a0b05dSBen Gardon */ 1187a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1188a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1189a6a0b05dSBen Gardon { 1190a6a0b05dSBen Gardon struct tdp_iter iter; 1191a6a0b05dSBen Gardon u64 new_spte; 1192a6a0b05dSBen Gardon bool spte_set = false; 1193a6a0b05dSBen Gardon 11947cca2d0bSBen Gardon rcu_read_lock(); 11957cca2d0bSBen Gardon 1196a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1197a6a0b05dSBen Gardon 1198a6a0b05dSBen Gardon for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 1199a6a0b05dSBen Gardon min_level, start, end) { 120024ae4cfaSBen Gardon retry: 120124ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12021af4a960SBen Gardon continue; 12031af4a960SBen Gardon 1204a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 12050f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 12060f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1207a6a0b05dSBen Gardon continue; 1208a6a0b05dSBen Gardon 1209a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1210a6a0b05dSBen Gardon 12116ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { 121224ae4cfaSBen Gardon /* 121324ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 121424ae4cfaSBen Gardon * the atomic cmpxchg failed. 121524ae4cfaSBen Gardon */ 121624ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 121724ae4cfaSBen Gardon goto retry; 121824ae4cfaSBen Gardon } 1219a6a0b05dSBen Gardon spte_set = true; 1220a6a0b05dSBen Gardon } 12217cca2d0bSBen Gardon 12227cca2d0bSBen Gardon rcu_read_unlock(); 1223a6a0b05dSBen Gardon return spte_set; 1224a6a0b05dSBen Gardon } 1225a6a0b05dSBen Gardon 1226a6a0b05dSBen Gardon /* 1227a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1228a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1229a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1230a6a0b05dSBen Gardon */ 1231269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1232269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1233a6a0b05dSBen Gardon { 1234a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1235a6a0b05dSBen Gardon bool spte_set = false; 1236a6a0b05dSBen Gardon 123724ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1238a6a0b05dSBen Gardon 1239d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1240a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1241a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1242a6a0b05dSBen Gardon 1243a6a0b05dSBen Gardon return spte_set; 1244a6a0b05dSBen Gardon } 1245a6a0b05dSBen Gardon 1246a6a0b05dSBen Gardon /* 1247a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1248a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1249a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1250a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1251a6a0b05dSBen Gardon * be flushed. 1252a6a0b05dSBen Gardon */ 1253a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1254a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1255a6a0b05dSBen Gardon { 1256a6a0b05dSBen Gardon struct tdp_iter iter; 1257a6a0b05dSBen Gardon u64 new_spte; 1258a6a0b05dSBen Gardon bool spte_set = false; 1259a6a0b05dSBen Gardon 12607cca2d0bSBen Gardon rcu_read_lock(); 12617cca2d0bSBen Gardon 1262a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 126324ae4cfaSBen Gardon retry: 126424ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 12651af4a960SBen Gardon continue; 12661af4a960SBen Gardon 1267a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1268a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1269a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1270a6a0b05dSBen Gardon else 1271a6a0b05dSBen Gardon continue; 1272a6a0b05dSBen Gardon } else { 1273a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1274a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1275a6a0b05dSBen Gardon else 1276a6a0b05dSBen Gardon continue; 1277a6a0b05dSBen Gardon } 1278a6a0b05dSBen Gardon 12796ccf4438SPaolo Bonzini if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) { 128024ae4cfaSBen Gardon /* 128124ae4cfaSBen Gardon * The iter must explicitly re-read the SPTE because 128224ae4cfaSBen Gardon * the atomic cmpxchg failed. 128324ae4cfaSBen Gardon */ 128424ae4cfaSBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 128524ae4cfaSBen Gardon goto retry; 128624ae4cfaSBen Gardon } 1287a6a0b05dSBen Gardon spte_set = true; 1288a6a0b05dSBen Gardon } 12897cca2d0bSBen Gardon 12907cca2d0bSBen Gardon rcu_read_unlock(); 1291a6a0b05dSBen Gardon return spte_set; 1292a6a0b05dSBen Gardon } 1293a6a0b05dSBen Gardon 1294a6a0b05dSBen Gardon /* 1295a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1296a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1297a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1298a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1299a6a0b05dSBen Gardon * be flushed. 1300a6a0b05dSBen Gardon */ 1301269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1302269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1303a6a0b05dSBen Gardon { 1304a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1305a6a0b05dSBen Gardon bool spte_set = false; 1306a6a0b05dSBen Gardon 130724ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1308a6a0b05dSBen Gardon 1309d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1310a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1311a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1312a6a0b05dSBen Gardon 1313a6a0b05dSBen Gardon return spte_set; 1314a6a0b05dSBen Gardon } 1315a6a0b05dSBen Gardon 1316a6a0b05dSBen Gardon /* 1317a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1318a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1319a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1320a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1321a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1322a6a0b05dSBen Gardon */ 1323a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1324a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1325a6a0b05dSBen Gardon { 1326a6a0b05dSBen Gardon struct tdp_iter iter; 1327a6a0b05dSBen Gardon u64 new_spte; 1328a6a0b05dSBen Gardon 13297cca2d0bSBen Gardon rcu_read_lock(); 13307cca2d0bSBen Gardon 1331a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1332a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1333a6a0b05dSBen Gardon if (!mask) 1334a6a0b05dSBen Gardon break; 1335a6a0b05dSBen Gardon 1336a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1337a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1338a6a0b05dSBen Gardon continue; 1339a6a0b05dSBen Gardon 1340f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1341f1b3b06aSBen Gardon 1342a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1343a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1344a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1345a6a0b05dSBen Gardon else 1346a6a0b05dSBen Gardon continue; 1347a6a0b05dSBen Gardon } else { 1348a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1349a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1350a6a0b05dSBen Gardon else 1351a6a0b05dSBen Gardon continue; 1352a6a0b05dSBen Gardon } 1353a6a0b05dSBen Gardon 1354a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1355a6a0b05dSBen Gardon } 13567cca2d0bSBen Gardon 13577cca2d0bSBen Gardon rcu_read_unlock(); 1358a6a0b05dSBen Gardon } 1359a6a0b05dSBen Gardon 1360a6a0b05dSBen Gardon /* 1361a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1362a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1363a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1364a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1365a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1366a6a0b05dSBen Gardon */ 1367a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1368a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1369a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1370a6a0b05dSBen Gardon bool wrprot) 1371a6a0b05dSBen Gardon { 1372a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1373a6a0b05dSBen Gardon 1374531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1375a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1376a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1377a6a0b05dSBen Gardon } 1378a6a0b05dSBen Gardon 1379a6a0b05dSBen Gardon /* 138087aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 138187aa9ec9SBen Gardon * GFNs within the slot. 138214881998SBen Gardon */ 13834b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm, 138414881998SBen Gardon struct kvm_mmu_page *root, 13854b85c921SSean Christopherson const struct kvm_memory_slot *slot) 138614881998SBen Gardon { 13879eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 13889eba50f8SSean Christopherson gfn_t end = start + slot->npages; 138914881998SBen Gardon struct tdp_iter iter; 139014881998SBen Gardon kvm_pfn_t pfn; 139114881998SBen Gardon 13927cca2d0bSBen Gardon rcu_read_lock(); 13937cca2d0bSBen Gardon 139414881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 13952db6f772SBen Gardon retry: 13964b85c921SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 13971af4a960SBen Gardon continue; 13981af4a960SBen Gardon 139914881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 140087aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 140114881998SBen Gardon continue; 140214881998SBen Gardon 140314881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 140414881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 14059eba50f8SSean Christopherson iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, 14069eba50f8SSean Christopherson pfn, PG_LEVEL_NUM)) 140714881998SBen Gardon continue; 140814881998SBen Gardon 14094b85c921SSean Christopherson /* Note, a successful atomic zap also does a remote TLB flush. */ 14102db6f772SBen Gardon if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { 14112db6f772SBen Gardon /* 14122db6f772SBen Gardon * The iter must explicitly re-read the SPTE because 14132db6f772SBen Gardon * the atomic cmpxchg failed. 14142db6f772SBen Gardon */ 14152db6f772SBen Gardon iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); 14162db6f772SBen Gardon goto retry; 14172db6f772SBen Gardon } 141814881998SBen Gardon } 141914881998SBen Gardon 14207cca2d0bSBen Gardon rcu_read_unlock(); 142114881998SBen Gardon } 142214881998SBen Gardon 142314881998SBen Gardon /* 142414881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 142514881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 142614881998SBen Gardon */ 14274b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 14284b85c921SSean Christopherson const struct kvm_memory_slot *slot) 142914881998SBen Gardon { 143014881998SBen Gardon struct kvm_mmu_page *root; 143114881998SBen Gardon 14322db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 143314881998SBen Gardon 1434d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 14354b85c921SSean Christopherson zap_collapsible_spte_range(kvm, root, slot); 143614881998SBen Gardon } 143746044f72SBen Gardon 143846044f72SBen Gardon /* 143946044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14405fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 144146044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 144246044f72SBen Gardon */ 144346044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 14443ad93562SKeqian Zhu gfn_t gfn, int min_level) 144546044f72SBen Gardon { 144646044f72SBen Gardon struct tdp_iter iter; 144746044f72SBen Gardon u64 new_spte; 144846044f72SBen Gardon bool spte_set = false; 144946044f72SBen Gardon 14503ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 14513ad93562SKeqian Zhu 14527cca2d0bSBen Gardon rcu_read_lock(); 14537cca2d0bSBen Gardon 14543ad93562SKeqian Zhu for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 14553ad93562SKeqian Zhu min_level, gfn, gfn + 1) { 14563ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 14573ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 14583ad93562SKeqian Zhu continue; 14593ad93562SKeqian Zhu 146046044f72SBen Gardon new_spte = iter.old_spte & 14615fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 146246044f72SBen Gardon 14637c8a4742SDavid Matlack if (new_spte == iter.old_spte) 14647c8a4742SDavid Matlack break; 14657c8a4742SDavid Matlack 146646044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 146746044f72SBen Gardon spte_set = true; 146846044f72SBen Gardon } 146946044f72SBen Gardon 14707cca2d0bSBen Gardon rcu_read_unlock(); 14717cca2d0bSBen Gardon 147246044f72SBen Gardon return spte_set; 147346044f72SBen Gardon } 147446044f72SBen Gardon 147546044f72SBen Gardon /* 147646044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 14775fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 147846044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 147946044f72SBen Gardon */ 148046044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 14813ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 14823ad93562SKeqian Zhu int min_level) 148346044f72SBen Gardon { 148446044f72SBen Gardon struct kvm_mmu_page *root; 148546044f72SBen Gardon bool spte_set = false; 148646044f72SBen Gardon 1487531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1488a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 14893ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1490a3f15bdaSSean Christopherson 149146044f72SBen Gardon return spte_set; 149246044f72SBen Gardon } 149346044f72SBen Gardon 149495fb5b02SBen Gardon /* 149595fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 149695fb5b02SBen Gardon * That SPTE may be non-present. 1497c5c8c7c5SDavid Matlack * 1498c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 149995fb5b02SBen Gardon */ 150039b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 150139b4d43eSSean Christopherson int *root_level) 150295fb5b02SBen Gardon { 150395fb5b02SBen Gardon struct tdp_iter iter; 150495fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 150595fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 15062aa07893SSean Christopherson int leaf = -1; 150795fb5b02SBen Gardon 150839b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 150995fb5b02SBen Gardon 151095fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 151195fb5b02SBen Gardon leaf = iter.level; 1512dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 151395fb5b02SBen Gardon } 151495fb5b02SBen Gardon 151595fb5b02SBen Gardon return leaf; 151695fb5b02SBen Gardon } 15176e8eb206SDavid Matlack 15186e8eb206SDavid Matlack /* 15196e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 15206e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 15216e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 15226e8eb206SDavid Matlack * 15236e8eb206SDavid Matlack * Contract: 15246e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 15256e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 15266e8eb206SDavid Matlack * 15276e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 15286e8eb206SDavid Matlack */ 15296e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 15306e8eb206SDavid Matlack u64 *spte) 15316e8eb206SDavid Matlack { 15326e8eb206SDavid Matlack struct tdp_iter iter; 15336e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 15346e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 15356e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 15366e8eb206SDavid Matlack 15376e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 15386e8eb206SDavid Matlack *spte = iter.old_spte; 15396e8eb206SDavid Matlack sptep = iter.sptep; 15406e8eb206SDavid Matlack } 15416e8eb206SDavid Matlack 15426e8eb206SDavid Matlack /* 15436e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 15446e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 15456e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 15466e8eb206SDavid Matlack * annotation. 15476e8eb206SDavid Matlack * 15486e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 15496e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 15506e8eb206SDavid Matlack * outside of mmu_lock. 15516e8eb206SDavid Matlack */ 15526e8eb206SDavid Matlack return rcu_dereference(sptep); 15536e8eb206SDavid Matlack } 1554