1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 1033dd3574SBen Gardon #include <trace/events/kvm.h> 1133dd3574SBen Gardon 1295fb5b02SBen Gardon #ifdef CONFIG_X86_64 13fe5db27dSBen Gardon static bool __read_mostly tdp_mmu_enabled = false; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 1595fb5b02SBen Gardon #endif 16fe5db27dSBen Gardon 17fe5db27dSBen Gardon static bool is_tdp_mmu_enabled(void) 18fe5db27dSBen Gardon { 19fe5db27dSBen Gardon #ifdef CONFIG_X86_64 20fe5db27dSBen Gardon return tdp_enabled && READ_ONCE(tdp_mmu_enabled); 21fe5db27dSBen Gardon #else 22fe5db27dSBen Gardon return false; 23fe5db27dSBen Gardon #endif /* CONFIG_X86_64 */ 24fe5db27dSBen Gardon } 25fe5db27dSBen Gardon 26fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 27fe5db27dSBen Gardon void kvm_mmu_init_tdp_mmu(struct kvm *kvm) 28fe5db27dSBen Gardon { 29fe5db27dSBen Gardon if (!is_tdp_mmu_enabled()) 30fe5db27dSBen Gardon return; 31fe5db27dSBen Gardon 32fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 33fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 3402c00b3aSBen Gardon 3502c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 3689c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 37fe5db27dSBen Gardon } 38fe5db27dSBen Gardon 39fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 40fe5db27dSBen Gardon { 41fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 42fe5db27dSBen Gardon return; 4302c00b3aSBen Gardon 4402c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 4502c00b3aSBen Gardon } 4602c00b3aSBen Gardon 47a889ea54SBen Gardon static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) 48a889ea54SBen Gardon { 49a889ea54SBen Gardon if (kvm_mmu_put_root(kvm, root)) 50a889ea54SBen Gardon kvm_tdp_mmu_free_root(kvm, root); 51a889ea54SBen Gardon } 52a889ea54SBen Gardon 53a889ea54SBen Gardon static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, 54a889ea54SBen Gardon struct kvm_mmu_page *root) 55a889ea54SBen Gardon { 56a889ea54SBen Gardon lockdep_assert_held(&kvm->mmu_lock); 57a889ea54SBen Gardon 58a889ea54SBen Gardon if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) 59a889ea54SBen Gardon return false; 60a889ea54SBen Gardon 61a889ea54SBen Gardon kvm_mmu_get_root(kvm, root); 62a889ea54SBen Gardon return true; 63a889ea54SBen Gardon 64a889ea54SBen Gardon } 65a889ea54SBen Gardon 66a889ea54SBen Gardon static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 67a889ea54SBen Gardon struct kvm_mmu_page *root) 68a889ea54SBen Gardon { 69a889ea54SBen Gardon struct kvm_mmu_page *next_root; 70a889ea54SBen Gardon 71a889ea54SBen Gardon next_root = list_next_entry(root, link); 72a889ea54SBen Gardon tdp_mmu_put_root(kvm, root); 73a889ea54SBen Gardon return next_root; 74a889ea54SBen Gardon } 75a889ea54SBen Gardon 76a889ea54SBen Gardon /* 77a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 78a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 79a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 80a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 81a889ea54SBen Gardon */ 82a889ea54SBen Gardon #define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \ 83a889ea54SBen Gardon for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \ 84a889ea54SBen Gardon typeof(*_root), link); \ 85a889ea54SBen Gardon tdp_mmu_next_root_valid(_kvm, _root); \ 86a889ea54SBen Gardon _root = tdp_mmu_next_root(_kvm, _root)) 87a889ea54SBen Gardon 8802c00b3aSBen Gardon #define for_each_tdp_mmu_root(_kvm, _root) \ 8902c00b3aSBen Gardon list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) 9002c00b3aSBen Gardon 9102c00b3aSBen Gardon bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) 9202c00b3aSBen Gardon { 9302c00b3aSBen Gardon struct kvm_mmu_page *sp; 9402c00b3aSBen Gardon 95c887c9b9SPaolo Bonzini if (!kvm->arch.tdp_mmu_enabled) 96c887c9b9SPaolo Bonzini return false; 97c887c9b9SPaolo Bonzini if (WARN_ON(!VALID_PAGE(hpa))) 98c887c9b9SPaolo Bonzini return false; 99c887c9b9SPaolo Bonzini 10002c00b3aSBen Gardon sp = to_shadow_page(hpa); 101c887c9b9SPaolo Bonzini if (WARN_ON(!sp)) 102c887c9b9SPaolo Bonzini return false; 10302c00b3aSBen Gardon 10402c00b3aSBen Gardon return sp->tdp_mmu_page && sp->root_count; 10502c00b3aSBen Gardon } 10602c00b3aSBen Gardon 107faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 108063afacdSBen Gardon gfn_t start, gfn_t end, bool can_yield); 109faaf05b0SBen Gardon 11002c00b3aSBen Gardon void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) 11102c00b3aSBen Gardon { 112339f5a7fSRick Edgecombe gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 113faaf05b0SBen Gardon 11402c00b3aSBen Gardon lockdep_assert_held(&kvm->mmu_lock); 11502c00b3aSBen Gardon 11602c00b3aSBen Gardon WARN_ON(root->root_count); 11702c00b3aSBen Gardon WARN_ON(!root->tdp_mmu_page); 11802c00b3aSBen Gardon 11902c00b3aSBen Gardon list_del(&root->link); 12002c00b3aSBen Gardon 121063afacdSBen Gardon zap_gfn_range(kvm, root, 0, max_gfn, false); 122faaf05b0SBen Gardon 12302c00b3aSBen Gardon free_page((unsigned long)root->spt); 12402c00b3aSBen Gardon kmem_cache_free(mmu_page_header_cache, root); 12502c00b3aSBen Gardon } 12602c00b3aSBen Gardon 12702c00b3aSBen Gardon static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, 12802c00b3aSBen Gardon int level) 12902c00b3aSBen Gardon { 13002c00b3aSBen Gardon union kvm_mmu_page_role role; 13102c00b3aSBen Gardon 13202c00b3aSBen Gardon role = vcpu->arch.mmu->mmu_role.base; 13302c00b3aSBen Gardon role.level = level; 13402c00b3aSBen Gardon role.direct = true; 13502c00b3aSBen Gardon role.gpte_is_8_bytes = true; 13602c00b3aSBen Gardon role.access = ACC_ALL; 13702c00b3aSBen Gardon 13802c00b3aSBen Gardon return role; 13902c00b3aSBen Gardon } 14002c00b3aSBen Gardon 14102c00b3aSBen Gardon static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, 14202c00b3aSBen Gardon int level) 14302c00b3aSBen Gardon { 14402c00b3aSBen Gardon struct kvm_mmu_page *sp; 14502c00b3aSBen Gardon 14602c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 14702c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 14802c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 14902c00b3aSBen Gardon 15002c00b3aSBen Gardon sp->role.word = page_role_for_level(vcpu, level).word; 15102c00b3aSBen Gardon sp->gfn = gfn; 15202c00b3aSBen Gardon sp->tdp_mmu_page = true; 15302c00b3aSBen Gardon 15433dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 15533dd3574SBen Gardon 15602c00b3aSBen Gardon return sp; 15702c00b3aSBen Gardon } 15802c00b3aSBen Gardon 15902c00b3aSBen Gardon static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) 16002c00b3aSBen Gardon { 16102c00b3aSBen Gardon union kvm_mmu_page_role role; 16202c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 16302c00b3aSBen Gardon struct kvm_mmu_page *root; 16402c00b3aSBen Gardon 16502c00b3aSBen Gardon role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); 16602c00b3aSBen Gardon 16702c00b3aSBen Gardon spin_lock(&kvm->mmu_lock); 16802c00b3aSBen Gardon 16902c00b3aSBen Gardon /* Check for an existing root before allocating a new one. */ 17002c00b3aSBen Gardon for_each_tdp_mmu_root(kvm, root) { 17102c00b3aSBen Gardon if (root->role.word == role.word) { 17202c00b3aSBen Gardon kvm_mmu_get_root(kvm, root); 17302c00b3aSBen Gardon spin_unlock(&kvm->mmu_lock); 17402c00b3aSBen Gardon return root; 17502c00b3aSBen Gardon } 17602c00b3aSBen Gardon } 17702c00b3aSBen Gardon 17802c00b3aSBen Gardon root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); 17902c00b3aSBen Gardon root->root_count = 1; 18002c00b3aSBen Gardon 18102c00b3aSBen Gardon list_add(&root->link, &kvm->arch.tdp_mmu_roots); 18202c00b3aSBen Gardon 18302c00b3aSBen Gardon spin_unlock(&kvm->mmu_lock); 18402c00b3aSBen Gardon 18502c00b3aSBen Gardon return root; 18602c00b3aSBen Gardon } 18702c00b3aSBen Gardon 18802c00b3aSBen Gardon hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 18902c00b3aSBen Gardon { 19002c00b3aSBen Gardon struct kvm_mmu_page *root; 19102c00b3aSBen Gardon 19202c00b3aSBen Gardon root = get_tdp_mmu_vcpu_root(vcpu); 19302c00b3aSBen Gardon if (!root) 19402c00b3aSBen Gardon return INVALID_PAGE; 19502c00b3aSBen Gardon 19602c00b3aSBen Gardon return __pa(root->spt); 197fe5db27dSBen Gardon } 1982f2fad08SBen Gardon 1992f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2002f2fad08SBen Gardon u64 old_spte, u64 new_spte, int level); 2012f2fad08SBen Gardon 202faaf05b0SBen Gardon static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 203faaf05b0SBen Gardon { 204faaf05b0SBen Gardon return sp->role.smm ? 1 : 0; 205faaf05b0SBen Gardon } 206faaf05b0SBen Gardon 207f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 208f8e14497SBen Gardon { 209f8e14497SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 210f8e14497SBen Gardon 211f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 212f8e14497SBen Gardon return; 213f8e14497SBen Gardon 214f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 215f8e14497SBen Gardon (!is_accessed_spte(new_spte) || pfn_changed)) 216f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 217f8e14497SBen Gardon } 218f8e14497SBen Gardon 219a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 220a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 221a6a0b05dSBen Gardon { 222a6a0b05dSBen Gardon bool pfn_changed; 223a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 224a6a0b05dSBen Gardon 225a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 226a6a0b05dSBen Gardon return; 227a6a0b05dSBen Gardon 228a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 229a6a0b05dSBen Gardon 230a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 231a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 232a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 233fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 234a6a0b05dSBen Gardon } 235a6a0b05dSBen Gardon } 236a6a0b05dSBen Gardon 2372f2fad08SBen Gardon /** 2382f2fad08SBen Gardon * handle_changed_spte - handle bookkeeping associated with an SPTE change 2392f2fad08SBen Gardon * @kvm: kvm instance 2402f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 2412f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 2422f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 2432f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 2442f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 2452f2fad08SBen Gardon * 2462f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 2472f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 2482f2fad08SBen Gardon */ 2492f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 2502f2fad08SBen Gardon u64 old_spte, u64 new_spte, int level) 2512f2fad08SBen Gardon { 2522f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 2532f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 2542f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 2552f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 2562f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 2572f2fad08SBen Gardon u64 *pt; 25889c0fd49SBen Gardon struct kvm_mmu_page *sp; 2592f2fad08SBen Gardon u64 old_child_spte; 2602f2fad08SBen Gardon int i; 2612f2fad08SBen Gardon 2622f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 2632f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 264764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 2652f2fad08SBen Gardon 2662f2fad08SBen Gardon /* 2672f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 2682f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 2692f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 2702f2fad08SBen Gardon * present leaf SPTE pointing to a differnt PFN. A notifier handler 2712f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 2722f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 2732f2fad08SBen Gardon * thread before replacement. 2742f2fad08SBen Gardon */ 2752f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 2762f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 2772f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 2782f2fad08SBen Gardon "different PFN!\n" 2792f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 2802f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 2812f2fad08SBen Gardon 2822f2fad08SBen Gardon /* 2832f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 2842f2fad08SBen Gardon * courruption. 2852f2fad08SBen Gardon */ 2862f2fad08SBen Gardon BUG(); 2872f2fad08SBen Gardon } 2882f2fad08SBen Gardon 2892f2fad08SBen Gardon if (old_spte == new_spte) 2902f2fad08SBen Gardon return; 2912f2fad08SBen Gardon 292b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 293b9a98c34SBen Gardon 2942f2fad08SBen Gardon /* 2952f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 2962f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 2972f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 2982f2fad08SBen Gardon */ 2992f2fad08SBen Gardon if (!was_present && !is_present) { 3002f2fad08SBen Gardon /* 3012f2fad08SBen Gardon * If this change does not involve a MMIO SPTE, it is 3022f2fad08SBen Gardon * unexpected. Log the change, though it should not impact the 3032f2fad08SBen Gardon * guest since both the former and current SPTEs are nonpresent. 3042f2fad08SBen Gardon */ 3052f2fad08SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte))) 3062f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 3072f2fad08SBen Gardon "should not be replaced with another,\n" 3082f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 3092f2fad08SBen Gardon "are MMIO SPTEs.\n" 3102f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 3112f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 3122f2fad08SBen Gardon return; 3132f2fad08SBen Gardon } 3142f2fad08SBen Gardon 3152f2fad08SBen Gardon 3162f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 3172f2fad08SBen Gardon (!is_dirty_spte(new_spte) || pfn_changed)) 3182f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 3192f2fad08SBen Gardon 3202f2fad08SBen Gardon /* 3212f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 3222f2fad08SBen Gardon * the paging structure. 3232f2fad08SBen Gardon */ 3242f2fad08SBen Gardon if (was_present && !was_leaf && (pfn_changed || !is_present)) { 3252f2fad08SBen Gardon pt = spte_to_child_pt(old_spte, level); 32689c0fd49SBen Gardon sp = sptep_to_sp(pt); 32789c0fd49SBen Gardon 32833dd3574SBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 32933dd3574SBen Gardon 33089c0fd49SBen Gardon list_del(&sp->link); 3312f2fad08SBen Gardon 33229cf0f50SBen Gardon if (sp->lpage_disallowed) 33329cf0f50SBen Gardon unaccount_huge_nx_page(kvm, sp); 33429cf0f50SBen Gardon 3352f2fad08SBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 3362f2fad08SBen Gardon old_child_spte = READ_ONCE(*(pt + i)); 3372f2fad08SBen Gardon WRITE_ONCE(*(pt + i), 0); 3382f2fad08SBen Gardon handle_changed_spte(kvm, as_id, 3392f2fad08SBen Gardon gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)), 3402f2fad08SBen Gardon old_child_spte, 0, level - 1); 3412f2fad08SBen Gardon } 3422f2fad08SBen Gardon 3432f2fad08SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, gfn, 3442f2fad08SBen Gardon KVM_PAGES_PER_HPAGE(level)); 3452f2fad08SBen Gardon 3462f2fad08SBen Gardon free_page((unsigned long)pt); 34789c0fd49SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 3482f2fad08SBen Gardon } 3492f2fad08SBen Gardon } 3502f2fad08SBen Gardon 3512f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3522f2fad08SBen Gardon u64 old_spte, u64 new_spte, int level) 3532f2fad08SBen Gardon { 3542f2fad08SBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level); 355f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 356a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 357a6a0b05dSBen Gardon new_spte, level); 3582f2fad08SBen Gardon } 359faaf05b0SBen Gardon 360f8e14497SBen Gardon static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 361a6a0b05dSBen Gardon u64 new_spte, bool record_acc_track, 362a6a0b05dSBen Gardon bool record_dirty_log) 363faaf05b0SBen Gardon { 364faaf05b0SBen Gardon u64 *root_pt = tdp_iter_root_pt(iter); 365faaf05b0SBen Gardon struct kvm_mmu_page *root = sptep_to_sp(root_pt); 366faaf05b0SBen Gardon int as_id = kvm_mmu_page_as_id(root); 367faaf05b0SBen Gardon 368f8e14497SBen Gardon WRITE_ONCE(*iter->sptep, new_spte); 369faaf05b0SBen Gardon 370f8e14497SBen Gardon __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, 371faaf05b0SBen Gardon iter->level); 372f8e14497SBen Gardon if (record_acc_track) 373f8e14497SBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, 374f8e14497SBen Gardon iter->level); 375a6a0b05dSBen Gardon if (record_dirty_log) 376a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, 377a6a0b05dSBen Gardon iter->old_spte, new_spte, 378a6a0b05dSBen Gardon iter->level); 379f8e14497SBen Gardon } 380f8e14497SBen Gardon 381f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 382f8e14497SBen Gardon u64 new_spte) 383f8e14497SBen Gardon { 384a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 385f8e14497SBen Gardon } 386f8e14497SBen Gardon 387f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 388f8e14497SBen Gardon struct tdp_iter *iter, 389f8e14497SBen Gardon u64 new_spte) 390f8e14497SBen Gardon { 391a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 392a6a0b05dSBen Gardon } 393a6a0b05dSBen Gardon 394a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 395a6a0b05dSBen Gardon struct tdp_iter *iter, 396a6a0b05dSBen Gardon u64 new_spte) 397a6a0b05dSBen Gardon { 398a6a0b05dSBen Gardon __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 399faaf05b0SBen Gardon } 400faaf05b0SBen Gardon 401faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 402faaf05b0SBen Gardon for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end) 403faaf05b0SBen Gardon 404f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 405f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 406f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 407f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 408f8e14497SBen Gardon continue; \ 409f8e14497SBen Gardon else 410f8e14497SBen Gardon 411bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 412bb18842eSBen Gardon for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \ 413bb18842eSBen Gardon _mmu->shadow_root_level, _start, _end) 414bb18842eSBen Gardon 415faaf05b0SBen Gardon /* 416*e28a436cSBen Gardon * Flush the TLB and yield if the MMU lock is contended or this thread needs to 417*e28a436cSBen Gardon * return control to the scheduler. 418*e28a436cSBen Gardon * 419*e28a436cSBen Gardon * If this function yields, it will also reset the tdp_iter's walk over the 420*e28a436cSBen Gardon * paging structure and the calling function should allow the iterator to 421*e28a436cSBen Gardon * continue its traversal from the paging structure root. 422*e28a436cSBen Gardon * 423*e28a436cSBen Gardon * Return true if this function yielded, the TLBs were flushed, and the 424*e28a436cSBen Gardon * iterator's traversal was reset. Return false if a yield was not needed. 425faaf05b0SBen Gardon */ 426faaf05b0SBen Gardon static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) 427faaf05b0SBen Gardon { 428faaf05b0SBen Gardon if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 429faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 430faaf05b0SBen Gardon cond_resched_lock(&kvm->mmu_lock); 431faaf05b0SBen Gardon tdp_iter_refresh_walk(iter); 432faaf05b0SBen Gardon return true; 433faaf05b0SBen Gardon } 434*e28a436cSBen Gardon 435*e28a436cSBen Gardon return false; 436faaf05b0SBen Gardon } 437faaf05b0SBen Gardon 438*e28a436cSBen Gardon /* 439*e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 440*e28a436cSBen Gardon * to the scheduler. 441*e28a436cSBen Gardon * 442*e28a436cSBen Gardon * If this function yields, it will also reset the tdp_iter's walk over the 443*e28a436cSBen Gardon * paging structure and the calling function should allow the iterator to 444*e28a436cSBen Gardon * continue its traversal from the paging structure root. 445*e28a436cSBen Gardon * 446*e28a436cSBen Gardon * Return true if this function yielded and the iterator's traversal was reset. 447*e28a436cSBen Gardon * Return false if a yield was not needed. 448*e28a436cSBen Gardon */ 449*e28a436cSBen Gardon static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) 450a6a0b05dSBen Gardon { 451a6a0b05dSBen Gardon if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 452a6a0b05dSBen Gardon cond_resched_lock(&kvm->mmu_lock); 453a6a0b05dSBen Gardon tdp_iter_refresh_walk(iter); 454*e28a436cSBen Gardon return true; 455a6a0b05dSBen Gardon } 456*e28a436cSBen Gardon 457*e28a436cSBen Gardon return false; 458a6a0b05dSBen Gardon } 459a6a0b05dSBen Gardon 460faaf05b0SBen Gardon /* 461faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 462faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 463faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 464faaf05b0SBen Gardon * MMU lock. 465063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 466063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 467063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 468063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 469063afacdSBen Gardon * operation can cause a soft lockup. 470faaf05b0SBen Gardon */ 471faaf05b0SBen Gardon static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 472063afacdSBen Gardon gfn_t start, gfn_t end, bool can_yield) 473faaf05b0SBen Gardon { 474faaf05b0SBen Gardon struct tdp_iter iter; 475faaf05b0SBen Gardon bool flush_needed = false; 476faaf05b0SBen Gardon 477faaf05b0SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 478faaf05b0SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 479faaf05b0SBen Gardon continue; 480faaf05b0SBen Gardon 481faaf05b0SBen Gardon /* 482faaf05b0SBen Gardon * If this is a non-last-level SPTE that covers a larger range 483faaf05b0SBen Gardon * than should be zapped, continue, and zap the mappings at a 484faaf05b0SBen Gardon * lower level. 485faaf05b0SBen Gardon */ 486faaf05b0SBen Gardon if ((iter.gfn < start || 487faaf05b0SBen Gardon iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && 488faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 489faaf05b0SBen Gardon continue; 490faaf05b0SBen Gardon 491faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 492faaf05b0SBen Gardon 493*e28a436cSBen Gardon flush_needed = !can_yield || 494*e28a436cSBen Gardon !tdp_mmu_iter_flush_cond_resched(kvm, &iter); 495faaf05b0SBen Gardon } 496faaf05b0SBen Gardon return flush_needed; 497faaf05b0SBen Gardon } 498faaf05b0SBen Gardon 499faaf05b0SBen Gardon /* 500faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 501faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 502faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 503faaf05b0SBen Gardon * MMU lock. 504faaf05b0SBen Gardon */ 505faaf05b0SBen Gardon bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) 506faaf05b0SBen Gardon { 507faaf05b0SBen Gardon struct kvm_mmu_page *root; 508faaf05b0SBen Gardon bool flush = false; 509faaf05b0SBen Gardon 510a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) 511063afacdSBen Gardon flush |= zap_gfn_range(kvm, root, start, end, true); 512faaf05b0SBen Gardon 513faaf05b0SBen Gardon return flush; 514faaf05b0SBen Gardon } 515faaf05b0SBen Gardon 516faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 517faaf05b0SBen Gardon { 518339f5a7fSRick Edgecombe gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); 519faaf05b0SBen Gardon bool flush; 520faaf05b0SBen Gardon 521faaf05b0SBen Gardon flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn); 522faaf05b0SBen Gardon if (flush) 523faaf05b0SBen Gardon kvm_flush_remote_tlbs(kvm); 524faaf05b0SBen Gardon } 525bb18842eSBen Gardon 526bb18842eSBen Gardon /* 527bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 528bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 529bb18842eSBen Gardon */ 530bb18842eSBen Gardon static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, 531bb18842eSBen Gardon int map_writable, 532bb18842eSBen Gardon struct tdp_iter *iter, 533bb18842eSBen Gardon kvm_pfn_t pfn, bool prefault) 534bb18842eSBen Gardon { 535bb18842eSBen Gardon u64 new_spte; 536bb18842eSBen Gardon int ret = 0; 537bb18842eSBen Gardon int make_spte_ret = 0; 538bb18842eSBen Gardon 539bb18842eSBen Gardon if (unlikely(is_noslot_pfn(pfn))) { 540bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 541bb18842eSBen Gardon trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte); 54233dd3574SBen Gardon } else { 543bb18842eSBen Gardon make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, 544bb18842eSBen Gardon pfn, iter->old_spte, prefault, true, 545bb18842eSBen Gardon map_writable, !shadow_accessed_mask, 546bb18842eSBen Gardon &new_spte); 54733dd3574SBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); 54833dd3574SBen Gardon } 549bb18842eSBen Gardon 550bb18842eSBen Gardon if (new_spte == iter->old_spte) 551bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 552bb18842eSBen Gardon else 553bb18842eSBen Gardon tdp_mmu_set_spte(vcpu->kvm, iter, new_spte); 554bb18842eSBen Gardon 555bb18842eSBen Gardon /* 556bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 557bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 558bb18842eSBen Gardon * the vCPU would have the same fault again. 559bb18842eSBen Gardon */ 560bb18842eSBen Gardon if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { 561bb18842eSBen Gardon if (write) 562bb18842eSBen Gardon ret = RET_PF_EMULATE; 563bb18842eSBen Gardon kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 564bb18842eSBen Gardon } 565bb18842eSBen Gardon 566bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 567bb18842eSBen Gardon if (unlikely(is_mmio_spte(new_spte))) 568bb18842eSBen Gardon ret = RET_PF_EMULATE; 569bb18842eSBen Gardon 570bb18842eSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); 571bb18842eSBen Gardon if (!prefault) 572bb18842eSBen Gardon vcpu->stat.pf_fixed++; 573bb18842eSBen Gardon 574bb18842eSBen Gardon return ret; 575bb18842eSBen Gardon } 576bb18842eSBen Gardon 577bb18842eSBen Gardon /* 578bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 579bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 580bb18842eSBen Gardon */ 581bb18842eSBen Gardon int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 582bb18842eSBen Gardon int map_writable, int max_level, kvm_pfn_t pfn, 583bb18842eSBen Gardon bool prefault) 584bb18842eSBen Gardon { 585bb18842eSBen Gardon bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); 586bb18842eSBen Gardon bool write = error_code & PFERR_WRITE_MASK; 587bb18842eSBen Gardon bool exec = error_code & PFERR_FETCH_MASK; 588bb18842eSBen Gardon bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; 589bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 590bb18842eSBen Gardon struct tdp_iter iter; 59189c0fd49SBen Gardon struct kvm_mmu_page *sp; 592bb18842eSBen Gardon u64 *child_pt; 593bb18842eSBen Gardon u64 new_spte; 594bb18842eSBen Gardon int ret; 595bb18842eSBen Gardon gfn_t gfn = gpa >> PAGE_SHIFT; 596bb18842eSBen Gardon int level; 597bb18842eSBen Gardon int req_level; 598bb18842eSBen Gardon 599bb18842eSBen Gardon if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) 600bb18842eSBen Gardon return RET_PF_RETRY; 601bb18842eSBen Gardon if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))) 602bb18842eSBen Gardon return RET_PF_RETRY; 603bb18842eSBen Gardon 604bb18842eSBen Gardon level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, 605bb18842eSBen Gardon huge_page_disallowed, &req_level); 606bb18842eSBen Gardon 607bb18842eSBen Gardon trace_kvm_mmu_spte_requested(gpa, level, pfn); 608bb18842eSBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 609bb18842eSBen Gardon if (nx_huge_page_workaround_enabled) 610bb18842eSBen Gardon disallowed_hugepage_adjust(iter.old_spte, gfn, 611bb18842eSBen Gardon iter.level, &pfn, &level); 612bb18842eSBen Gardon 613bb18842eSBen Gardon if (iter.level == level) 614bb18842eSBen Gardon break; 615bb18842eSBen Gardon 616bb18842eSBen Gardon /* 617bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 618bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 619bb18842eSBen Gardon * with a non-leaf SPTE. 620bb18842eSBen Gardon */ 621bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 622bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 623bb18842eSBen Gardon tdp_mmu_set_spte(vcpu->kvm, &iter, 0); 624bb18842eSBen Gardon 625bb18842eSBen Gardon kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, 626bb18842eSBen Gardon KVM_PAGES_PER_HPAGE(iter.level)); 627bb18842eSBen Gardon 628bb18842eSBen Gardon /* 629bb18842eSBen Gardon * The iter must explicitly re-read the spte here 630bb18842eSBen Gardon * because the new value informs the !present 631bb18842eSBen Gardon * path below. 632bb18842eSBen Gardon */ 633bb18842eSBen Gardon iter.old_spte = READ_ONCE(*iter.sptep); 634bb18842eSBen Gardon } 635bb18842eSBen Gardon 636bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 63789c0fd49SBen Gardon sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); 63889c0fd49SBen Gardon list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages); 63989c0fd49SBen Gardon child_pt = sp->spt; 640bb18842eSBen Gardon clear_page(child_pt); 641bb18842eSBen Gardon new_spte = make_nonleaf_spte(child_pt, 642bb18842eSBen Gardon !shadow_accessed_mask); 643bb18842eSBen Gardon 644bb18842eSBen Gardon trace_kvm_mmu_get_page(sp, true); 64529cf0f50SBen Gardon if (huge_page_disallowed && req_level >= iter.level) 64629cf0f50SBen Gardon account_huge_nx_page(vcpu->kvm, sp); 64729cf0f50SBen Gardon 648bb18842eSBen Gardon tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); 649bb18842eSBen Gardon } 650bb18842eSBen Gardon } 651bb18842eSBen Gardon 652bb18842eSBen Gardon if (WARN_ON(iter.level != level)) 653bb18842eSBen Gardon return RET_PF_RETRY; 654bb18842eSBen Gardon 655bb18842eSBen Gardon ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, 656bb18842eSBen Gardon pfn, prefault); 657bb18842eSBen Gardon 658bb18842eSBen Gardon return ret; 659bb18842eSBen Gardon } 660063afacdSBen Gardon 661063afacdSBen Gardon static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start, 662063afacdSBen Gardon unsigned long end, unsigned long data, 663063afacdSBen Gardon int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot, 664063afacdSBen Gardon struct kvm_mmu_page *root, gfn_t start, 665063afacdSBen Gardon gfn_t end, unsigned long data)) 666063afacdSBen Gardon { 667063afacdSBen Gardon struct kvm_memslots *slots; 668063afacdSBen Gardon struct kvm_memory_slot *memslot; 669063afacdSBen Gardon struct kvm_mmu_page *root; 670063afacdSBen Gardon int ret = 0; 671063afacdSBen Gardon int as_id; 672063afacdSBen Gardon 673a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) { 674063afacdSBen Gardon as_id = kvm_mmu_page_as_id(root); 675063afacdSBen Gardon slots = __kvm_memslots(kvm, as_id); 676063afacdSBen Gardon kvm_for_each_memslot(memslot, slots) { 677063afacdSBen Gardon unsigned long hva_start, hva_end; 678063afacdSBen Gardon gfn_t gfn_start, gfn_end; 679063afacdSBen Gardon 680063afacdSBen Gardon hva_start = max(start, memslot->userspace_addr); 681063afacdSBen Gardon hva_end = min(end, memslot->userspace_addr + 682063afacdSBen Gardon (memslot->npages << PAGE_SHIFT)); 683063afacdSBen Gardon if (hva_start >= hva_end) 684063afacdSBen Gardon continue; 685063afacdSBen Gardon /* 686063afacdSBen Gardon * {gfn(page) | page intersects with [hva_start, hva_end)} = 687063afacdSBen Gardon * {gfn_start, gfn_start+1, ..., gfn_end-1}. 688063afacdSBen Gardon */ 689063afacdSBen Gardon gfn_start = hva_to_gfn_memslot(hva_start, memslot); 690063afacdSBen Gardon gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); 691063afacdSBen Gardon 692063afacdSBen Gardon ret |= handler(kvm, memslot, root, gfn_start, 693063afacdSBen Gardon gfn_end, data); 694063afacdSBen Gardon } 695063afacdSBen Gardon } 696063afacdSBen Gardon 697063afacdSBen Gardon return ret; 698063afacdSBen Gardon } 699063afacdSBen Gardon 700063afacdSBen Gardon static int zap_gfn_range_hva_wrapper(struct kvm *kvm, 701063afacdSBen Gardon struct kvm_memory_slot *slot, 702063afacdSBen Gardon struct kvm_mmu_page *root, gfn_t start, 703063afacdSBen Gardon gfn_t end, unsigned long unused) 704063afacdSBen Gardon { 705063afacdSBen Gardon return zap_gfn_range(kvm, root, start, end, false); 706063afacdSBen Gardon } 707063afacdSBen Gardon 708063afacdSBen Gardon int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, 709063afacdSBen Gardon unsigned long end) 710063afacdSBen Gardon { 711063afacdSBen Gardon return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, 712063afacdSBen Gardon zap_gfn_range_hva_wrapper); 713063afacdSBen Gardon } 714f8e14497SBen Gardon 715f8e14497SBen Gardon /* 716f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 717f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 718f8e14497SBen Gardon */ 719f8e14497SBen Gardon static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, 720f8e14497SBen Gardon struct kvm_mmu_page *root, gfn_t start, gfn_t end, 721f8e14497SBen Gardon unsigned long unused) 722f8e14497SBen Gardon { 723f8e14497SBen Gardon struct tdp_iter iter; 724f8e14497SBen Gardon int young = 0; 725f8e14497SBen Gardon u64 new_spte = 0; 726f8e14497SBen Gardon 727f8e14497SBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 728f8e14497SBen Gardon /* 729f8e14497SBen Gardon * If we have a non-accessed entry we don't need to change the 730f8e14497SBen Gardon * pte. 731f8e14497SBen Gardon */ 732f8e14497SBen Gardon if (!is_accessed_spte(iter.old_spte)) 733f8e14497SBen Gardon continue; 734f8e14497SBen Gardon 735f8e14497SBen Gardon new_spte = iter.old_spte; 736f8e14497SBen Gardon 737f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 738f8e14497SBen Gardon clear_bit((ffs(shadow_accessed_mask) - 1), 739f8e14497SBen Gardon (unsigned long *)&new_spte); 740f8e14497SBen Gardon } else { 741f8e14497SBen Gardon /* 742f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 743f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 744f8e14497SBen Gardon */ 745f8e14497SBen Gardon if (is_writable_pte(new_spte)) 746f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 747f8e14497SBen Gardon 748f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 749f8e14497SBen Gardon } 750a6a0b05dSBen Gardon new_spte &= ~shadow_dirty_mask; 751f8e14497SBen Gardon 752f8e14497SBen Gardon tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte); 753f8e14497SBen Gardon young = 1; 75433dd3574SBen Gardon 75533dd3574SBen Gardon trace_kvm_age_page(iter.gfn, iter.level, slot, young); 756f8e14497SBen Gardon } 757f8e14497SBen Gardon 758f8e14497SBen Gardon return young; 759f8e14497SBen Gardon } 760f8e14497SBen Gardon 761f8e14497SBen Gardon int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, 762f8e14497SBen Gardon unsigned long end) 763f8e14497SBen Gardon { 764f8e14497SBen Gardon return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, 765f8e14497SBen Gardon age_gfn_range); 766f8e14497SBen Gardon } 767f8e14497SBen Gardon 768f8e14497SBen Gardon static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, 769f8e14497SBen Gardon struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, 770f8e14497SBen Gardon unsigned long unused2) 771f8e14497SBen Gardon { 772f8e14497SBen Gardon struct tdp_iter iter; 773f8e14497SBen Gardon 774f8e14497SBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) 775f8e14497SBen Gardon if (is_accessed_spte(iter.old_spte)) 776f8e14497SBen Gardon return 1; 777f8e14497SBen Gardon 778f8e14497SBen Gardon return 0; 779f8e14497SBen Gardon } 780f8e14497SBen Gardon 781f8e14497SBen Gardon int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva) 782f8e14497SBen Gardon { 783f8e14497SBen Gardon return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0, 784f8e14497SBen Gardon test_age_gfn); 785f8e14497SBen Gardon } 7861d8dd6b3SBen Gardon 7871d8dd6b3SBen Gardon /* 7881d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 7891d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 7901d8dd6b3SBen Gardon * notifier. 7911d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 7921d8dd6b3SBen Gardon */ 7931d8dd6b3SBen Gardon static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, 7941d8dd6b3SBen Gardon struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, 7951d8dd6b3SBen Gardon unsigned long data) 7961d8dd6b3SBen Gardon { 7971d8dd6b3SBen Gardon struct tdp_iter iter; 7981d8dd6b3SBen Gardon pte_t *ptep = (pte_t *)data; 7991d8dd6b3SBen Gardon kvm_pfn_t new_pfn; 8001d8dd6b3SBen Gardon u64 new_spte; 8011d8dd6b3SBen Gardon int need_flush = 0; 8021d8dd6b3SBen Gardon 8031d8dd6b3SBen Gardon WARN_ON(pte_huge(*ptep)); 8041d8dd6b3SBen Gardon 8051d8dd6b3SBen Gardon new_pfn = pte_pfn(*ptep); 8061d8dd6b3SBen Gardon 8071d8dd6b3SBen Gardon tdp_root_for_each_pte(iter, root, gfn, gfn + 1) { 8081d8dd6b3SBen Gardon if (iter.level != PG_LEVEL_4K) 8091d8dd6b3SBen Gardon continue; 8101d8dd6b3SBen Gardon 8111d8dd6b3SBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 8121d8dd6b3SBen Gardon break; 8131d8dd6b3SBen Gardon 8141d8dd6b3SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 8151d8dd6b3SBen Gardon 8161d8dd6b3SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1); 8171d8dd6b3SBen Gardon 8181d8dd6b3SBen Gardon if (!pte_write(*ptep)) { 8191d8dd6b3SBen Gardon new_spte = kvm_mmu_changed_pte_notifier_make_spte( 8201d8dd6b3SBen Gardon iter.old_spte, new_pfn); 8211d8dd6b3SBen Gardon 8221d8dd6b3SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 8231d8dd6b3SBen Gardon } 8241d8dd6b3SBen Gardon 8251d8dd6b3SBen Gardon need_flush = 1; 8261d8dd6b3SBen Gardon } 8271d8dd6b3SBen Gardon 8281d8dd6b3SBen Gardon if (need_flush) 8291d8dd6b3SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); 8301d8dd6b3SBen Gardon 8311d8dd6b3SBen Gardon return 0; 8321d8dd6b3SBen Gardon } 8331d8dd6b3SBen Gardon 8341d8dd6b3SBen Gardon int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, 8351d8dd6b3SBen Gardon pte_t *host_ptep) 8361d8dd6b3SBen Gardon { 8371d8dd6b3SBen Gardon return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1, 8381d8dd6b3SBen Gardon (unsigned long)host_ptep, 8391d8dd6b3SBen Gardon set_tdp_spte); 8401d8dd6b3SBen Gardon } 8411d8dd6b3SBen Gardon 842a6a0b05dSBen Gardon /* 843a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs [start, end). If 844a6a0b05dSBen Gardon * skip_4k is set, SPTEs that map 4k pages, will not be write-protected. 845a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 846a6a0b05dSBen Gardon */ 847a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 848a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 849a6a0b05dSBen Gardon { 850a6a0b05dSBen Gardon struct tdp_iter iter; 851a6a0b05dSBen Gardon u64 new_spte; 852a6a0b05dSBen Gardon bool spte_set = false; 853a6a0b05dSBen Gardon 854a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 855a6a0b05dSBen Gardon 856a6a0b05dSBen Gardon for_each_tdp_pte_min_level(iter, root->spt, root->role.level, 857a6a0b05dSBen Gardon min_level, start, end) { 858a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 859a6a0b05dSBen Gardon !is_last_spte(iter.old_spte, iter.level)) 860a6a0b05dSBen Gardon continue; 861a6a0b05dSBen Gardon 862a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 863a6a0b05dSBen Gardon 864a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 865a6a0b05dSBen Gardon spte_set = true; 866a6a0b05dSBen Gardon 867a6a0b05dSBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter); 868a6a0b05dSBen Gardon } 869a6a0b05dSBen Gardon return spte_set; 870a6a0b05dSBen Gardon } 871a6a0b05dSBen Gardon 872a6a0b05dSBen Gardon /* 873a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 874a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 875a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 876a6a0b05dSBen Gardon */ 877a6a0b05dSBen Gardon bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, 878a6a0b05dSBen Gardon int min_level) 879a6a0b05dSBen Gardon { 880a6a0b05dSBen Gardon struct kvm_mmu_page *root; 881a6a0b05dSBen Gardon int root_as_id; 882a6a0b05dSBen Gardon bool spte_set = false; 883a6a0b05dSBen Gardon 884a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) { 885a6a0b05dSBen Gardon root_as_id = kvm_mmu_page_as_id(root); 886a6a0b05dSBen Gardon if (root_as_id != slot->as_id) 887a6a0b05dSBen Gardon continue; 888a6a0b05dSBen Gardon 889a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 890a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 891a6a0b05dSBen Gardon } 892a6a0b05dSBen Gardon 893a6a0b05dSBen Gardon return spte_set; 894a6a0b05dSBen Gardon } 895a6a0b05dSBen Gardon 896a6a0b05dSBen Gardon /* 897a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 898a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 899a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 900a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 901a6a0b05dSBen Gardon * be flushed. 902a6a0b05dSBen Gardon */ 903a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 904a6a0b05dSBen Gardon gfn_t start, gfn_t end) 905a6a0b05dSBen Gardon { 906a6a0b05dSBen Gardon struct tdp_iter iter; 907a6a0b05dSBen Gardon u64 new_spte; 908a6a0b05dSBen Gardon bool spte_set = false; 909a6a0b05dSBen Gardon 910a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 911a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 912a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 913a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 914a6a0b05dSBen Gardon else 915a6a0b05dSBen Gardon continue; 916a6a0b05dSBen Gardon } else { 917a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 918a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 919a6a0b05dSBen Gardon else 920a6a0b05dSBen Gardon continue; 921a6a0b05dSBen Gardon } 922a6a0b05dSBen Gardon 923a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 924a6a0b05dSBen Gardon spte_set = true; 925a6a0b05dSBen Gardon 926a6a0b05dSBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter); 927a6a0b05dSBen Gardon } 928a6a0b05dSBen Gardon return spte_set; 929a6a0b05dSBen Gardon } 930a6a0b05dSBen Gardon 931a6a0b05dSBen Gardon /* 932a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 933a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 934a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 935a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 936a6a0b05dSBen Gardon * be flushed. 937a6a0b05dSBen Gardon */ 938a6a0b05dSBen Gardon bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot) 939a6a0b05dSBen Gardon { 940a6a0b05dSBen Gardon struct kvm_mmu_page *root; 941a6a0b05dSBen Gardon int root_as_id; 942a6a0b05dSBen Gardon bool spte_set = false; 943a6a0b05dSBen Gardon 944a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) { 945a6a0b05dSBen Gardon root_as_id = kvm_mmu_page_as_id(root); 946a6a0b05dSBen Gardon if (root_as_id != slot->as_id) 947a6a0b05dSBen Gardon continue; 948a6a0b05dSBen Gardon 949a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 950a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 951a6a0b05dSBen Gardon } 952a6a0b05dSBen Gardon 953a6a0b05dSBen Gardon return spte_set; 954a6a0b05dSBen Gardon } 955a6a0b05dSBen Gardon 956a6a0b05dSBen Gardon /* 957a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 958a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 959a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 960a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 961a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 962a6a0b05dSBen Gardon */ 963a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 964a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 965a6a0b05dSBen Gardon { 966a6a0b05dSBen Gardon struct tdp_iter iter; 967a6a0b05dSBen Gardon u64 new_spte; 968a6a0b05dSBen Gardon 969a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 970a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 971a6a0b05dSBen Gardon if (!mask) 972a6a0b05dSBen Gardon break; 973a6a0b05dSBen Gardon 974a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 975a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 976a6a0b05dSBen Gardon continue; 977a6a0b05dSBen Gardon 978a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 979a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 980a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 981a6a0b05dSBen Gardon else 982a6a0b05dSBen Gardon continue; 983a6a0b05dSBen Gardon } else { 984a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 985a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 986a6a0b05dSBen Gardon else 987a6a0b05dSBen Gardon continue; 988a6a0b05dSBen Gardon } 989a6a0b05dSBen Gardon 990a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 991a6a0b05dSBen Gardon 992a6a0b05dSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 993a6a0b05dSBen Gardon } 994a6a0b05dSBen Gardon } 995a6a0b05dSBen Gardon 996a6a0b05dSBen Gardon /* 997a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 998a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 999a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1000a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1001a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1002a6a0b05dSBen Gardon */ 1003a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1004a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1005a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1006a6a0b05dSBen Gardon bool wrprot) 1007a6a0b05dSBen Gardon { 1008a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1009a6a0b05dSBen Gardon int root_as_id; 1010a6a0b05dSBen Gardon 1011a6a0b05dSBen Gardon lockdep_assert_held(&kvm->mmu_lock); 1012a6a0b05dSBen Gardon for_each_tdp_mmu_root(kvm, root) { 1013a6a0b05dSBen Gardon root_as_id = kvm_mmu_page_as_id(root); 1014a6a0b05dSBen Gardon if (root_as_id != slot->as_id) 1015a6a0b05dSBen Gardon continue; 1016a6a0b05dSBen Gardon 1017a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1018a6a0b05dSBen Gardon } 1019a6a0b05dSBen Gardon } 1020a6a0b05dSBen Gardon 1021a6a0b05dSBen Gardon /* 1022a6a0b05dSBen Gardon * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is 1023a6a0b05dSBen Gardon * only used for PML, and so will involve setting the dirty bit on each SPTE. 1024a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1025a6a0b05dSBen Gardon */ 1026a6a0b05dSBen Gardon static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1027a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1028a6a0b05dSBen Gardon { 1029a6a0b05dSBen Gardon struct tdp_iter iter; 1030a6a0b05dSBen Gardon u64 new_spte; 1031a6a0b05dSBen Gardon bool spte_set = false; 1032a6a0b05dSBen Gardon 1033a6a0b05dSBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 1034a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) 1035a6a0b05dSBen Gardon continue; 1036a6a0b05dSBen Gardon 1037a6a0b05dSBen Gardon new_spte = iter.old_spte | shadow_dirty_mask; 1038a6a0b05dSBen Gardon 1039a6a0b05dSBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 1040a6a0b05dSBen Gardon spte_set = true; 1041a6a0b05dSBen Gardon 1042a6a0b05dSBen Gardon tdp_mmu_iter_cond_resched(kvm, &iter); 1043a6a0b05dSBen Gardon } 1044a6a0b05dSBen Gardon 1045a6a0b05dSBen Gardon return spte_set; 1046a6a0b05dSBen Gardon } 1047a6a0b05dSBen Gardon 1048a6a0b05dSBen Gardon /* 1049a6a0b05dSBen Gardon * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is 1050a6a0b05dSBen Gardon * only used for PML, and so will involve setting the dirty bit on each SPTE. 1051a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1052a6a0b05dSBen Gardon */ 1053a6a0b05dSBen Gardon bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) 1054a6a0b05dSBen Gardon { 1055a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1056a6a0b05dSBen Gardon int root_as_id; 1057a6a0b05dSBen Gardon bool spte_set = false; 1058a6a0b05dSBen Gardon 1059a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) { 1060a6a0b05dSBen Gardon root_as_id = kvm_mmu_page_as_id(root); 1061a6a0b05dSBen Gardon if (root_as_id != slot->as_id) 1062a6a0b05dSBen Gardon continue; 1063a6a0b05dSBen Gardon 1064a6a0b05dSBen Gardon spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, 1065a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1066a6a0b05dSBen Gardon } 1067a6a0b05dSBen Gardon return spte_set; 1068a6a0b05dSBen Gardon } 1069a6a0b05dSBen Gardon 107014881998SBen Gardon /* 107187aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 107287aa9ec9SBen Gardon * GFNs within the slot. 107314881998SBen Gardon */ 107414881998SBen Gardon static void zap_collapsible_spte_range(struct kvm *kvm, 107514881998SBen Gardon struct kvm_mmu_page *root, 107614881998SBen Gardon gfn_t start, gfn_t end) 107714881998SBen Gardon { 107814881998SBen Gardon struct tdp_iter iter; 107914881998SBen Gardon kvm_pfn_t pfn; 108014881998SBen Gardon bool spte_set = false; 108114881998SBen Gardon 108214881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 108314881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 108487aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 108514881998SBen Gardon continue; 108614881998SBen Gardon 108714881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 108814881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 108914881998SBen Gardon !PageTransCompoundMap(pfn_to_page(pfn))) 109014881998SBen Gardon continue; 109114881998SBen Gardon 109214881998SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 109314881998SBen Gardon 1094*e28a436cSBen Gardon spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter); 109514881998SBen Gardon } 109614881998SBen Gardon 109714881998SBen Gardon if (spte_set) 109814881998SBen Gardon kvm_flush_remote_tlbs(kvm); 109914881998SBen Gardon } 110014881998SBen Gardon 110114881998SBen Gardon /* 110214881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 110314881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 110414881998SBen Gardon */ 110514881998SBen Gardon void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 110614881998SBen Gardon const struct kvm_memory_slot *slot) 110714881998SBen Gardon { 110814881998SBen Gardon struct kvm_mmu_page *root; 110914881998SBen Gardon int root_as_id; 111014881998SBen Gardon 1111a889ea54SBen Gardon for_each_tdp_mmu_root_yield_safe(kvm, root) { 111214881998SBen Gardon root_as_id = kvm_mmu_page_as_id(root); 111314881998SBen Gardon if (root_as_id != slot->as_id) 111414881998SBen Gardon continue; 111514881998SBen Gardon 111614881998SBen Gardon zap_collapsible_spte_range(kvm, root, slot->base_gfn, 111714881998SBen Gardon slot->base_gfn + slot->npages); 111814881998SBen Gardon } 111914881998SBen Gardon } 112046044f72SBen Gardon 112146044f72SBen Gardon /* 112246044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 112346044f72SBen Gardon * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted. 112446044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 112546044f72SBen Gardon */ 112646044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 112746044f72SBen Gardon gfn_t gfn) 112846044f72SBen Gardon { 112946044f72SBen Gardon struct tdp_iter iter; 113046044f72SBen Gardon u64 new_spte; 113146044f72SBen Gardon bool spte_set = false; 113246044f72SBen Gardon 113346044f72SBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { 113446044f72SBen Gardon if (!is_writable_pte(iter.old_spte)) 113546044f72SBen Gardon break; 113646044f72SBen Gardon 113746044f72SBen Gardon new_spte = iter.old_spte & 113846044f72SBen Gardon ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 113946044f72SBen Gardon 114046044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 114146044f72SBen Gardon spte_set = true; 114246044f72SBen Gardon } 114346044f72SBen Gardon 114446044f72SBen Gardon return spte_set; 114546044f72SBen Gardon } 114646044f72SBen Gardon 114746044f72SBen Gardon /* 114846044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 114946044f72SBen Gardon * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted. 115046044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 115146044f72SBen Gardon */ 115246044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 115346044f72SBen Gardon struct kvm_memory_slot *slot, gfn_t gfn) 115446044f72SBen Gardon { 115546044f72SBen Gardon struct kvm_mmu_page *root; 115646044f72SBen Gardon int root_as_id; 115746044f72SBen Gardon bool spte_set = false; 115846044f72SBen Gardon 115946044f72SBen Gardon lockdep_assert_held(&kvm->mmu_lock); 116046044f72SBen Gardon for_each_tdp_mmu_root(kvm, root) { 116146044f72SBen Gardon root_as_id = kvm_mmu_page_as_id(root); 116246044f72SBen Gardon if (root_as_id != slot->as_id) 116346044f72SBen Gardon continue; 116446044f72SBen Gardon 116546044f72SBen Gardon spte_set |= write_protect_gfn(kvm, root, gfn); 116646044f72SBen Gardon } 116746044f72SBen Gardon return spte_set; 116846044f72SBen Gardon } 116946044f72SBen Gardon 117095fb5b02SBen Gardon /* 117195fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 117295fb5b02SBen Gardon * That SPTE may be non-present. 117395fb5b02SBen Gardon */ 117439b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 117539b4d43eSSean Christopherson int *root_level) 117695fb5b02SBen Gardon { 117795fb5b02SBen Gardon struct tdp_iter iter; 117895fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 117995fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 11802aa07893SSean Christopherson int leaf = -1; 118195fb5b02SBen Gardon 118239b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 118395fb5b02SBen Gardon 118495fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 118595fb5b02SBen Gardon leaf = iter.level; 1186dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 118795fb5b02SBen Gardon } 118895fb5b02SBen Gardon 118995fb5b02SBen Gardon return leaf; 119095fb5b02SBen Gardon } 1191