1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 28d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3fe5db27dSBen Gardon 402c00b3aSBen Gardon #include "mmu.h" 502c00b3aSBen Gardon #include "mmu_internal.h" 6bb18842eSBen Gardon #include "mmutrace.h" 72f2fad08SBen Gardon #include "tdp_iter.h" 8fe5db27dSBen Gardon #include "tdp_mmu.h" 902c00b3aSBen Gardon #include "spte.h" 10fe5db27dSBen Gardon 119a77daacSBen Gardon #include <asm/cmpxchg.h> 1233dd3574SBen Gardon #include <trace/events/kvm.h> 1333dd3574SBen Gardon 14fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 15a1a39128SPaolo Bonzini int kvm_mmu_init_tdp_mmu(struct kvm *kvm) 16fe5db27dSBen Gardon { 17a1a39128SPaolo Bonzini struct workqueue_struct *wq; 18a1a39128SPaolo Bonzini 19a1a39128SPaolo Bonzini wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); 20a1a39128SPaolo Bonzini if (!wq) 21a1a39128SPaolo Bonzini return -ENOMEM; 22fe5db27dSBen Gardon 2302c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 249a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 25a1a39128SPaolo Bonzini kvm->arch.tdp_mmu_zap_wq = wq; 26a1a39128SPaolo Bonzini return 1; 27fe5db27dSBen Gardon } 28fe5db27dSBen Gardon 29226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */ 30226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 316103bc07SBen Gardon bool shared) 326103bc07SBen Gardon { 336103bc07SBen Gardon if (shared) 346103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 356103bc07SBen Gardon else 366103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 37226b8c8fSSean Christopherson 38226b8c8fSSean Christopherson return true; 396103bc07SBen Gardon } 406103bc07SBen Gardon 41fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 42fe5db27dSBen Gardon { 433203a56aSLv Ruyi /* Also waits for any queued work items. */ 4422b94c4bSPaolo Bonzini destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); 4522b94c4bSPaolo Bonzini 46d25ceb92SSean Christopherson WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); 4702c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 487cca2d0bSBen Gardon 497cca2d0bSBen Gardon /* 507cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 5122b94c4bSPaolo Bonzini * can run before the VM is torn down. Work items on tdp_mmu_zap_wq 5222b94c4bSPaolo Bonzini * can call kvm_tdp_mmu_put_root and create new callbacks. 537cca2d0bSBen Gardon */ 547cca2d0bSBen Gardon rcu_barrier(); 5502c00b3aSBen Gardon } 5602c00b3aSBen Gardon 572bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 58a889ea54SBen Gardon { 592bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 602bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 61a889ea54SBen Gardon } 62a889ea54SBen Gardon 63c0e64238SBen Gardon /* 64c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 65c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 66c0e64238SBen Gardon * the memory. 67c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 68c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 69c0e64238SBen Gardon * memory won't use it after it is freed. 70c0e64238SBen Gardon */ 71c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 72a889ea54SBen Gardon { 73c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 74c0e64238SBen Gardon rcu_head); 75a889ea54SBen Gardon 76c0e64238SBen Gardon tdp_mmu_free_sp(sp); 77a889ea54SBen Gardon } 78a889ea54SBen Gardon 79e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 80e2b5b21dSSean Christopherson bool shared); 81e2b5b21dSSean Christopherson 8222b94c4bSPaolo Bonzini static void tdp_mmu_zap_root_work(struct work_struct *work) 8322b94c4bSPaolo Bonzini { 8422b94c4bSPaolo Bonzini struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, 8522b94c4bSPaolo Bonzini tdp_mmu_async_work); 8622b94c4bSPaolo Bonzini struct kvm *kvm = root->tdp_mmu_async_data; 8722b94c4bSPaolo Bonzini 8822b94c4bSPaolo Bonzini read_lock(&kvm->mmu_lock); 8922b94c4bSPaolo Bonzini 9022b94c4bSPaolo Bonzini /* 9122b94c4bSPaolo Bonzini * A TLB flush is not necessary as KVM performs a local TLB flush when 9222b94c4bSPaolo Bonzini * allocating a new root (see kvm_mmu_load()), and when migrating vCPU 9322b94c4bSPaolo Bonzini * to a different pCPU. Note, the local TLB flush on reuse also 9422b94c4bSPaolo Bonzini * invalidates any paging-structure-cache entries, i.e. TLB entries for 9522b94c4bSPaolo Bonzini * intermediate paging structures, that may be zapped, as such entries 9622b94c4bSPaolo Bonzini * are associated with the ASID on both VMX and SVM. 9722b94c4bSPaolo Bonzini */ 9822b94c4bSPaolo Bonzini tdp_mmu_zap_root(kvm, root, true); 9922b94c4bSPaolo Bonzini 10022b94c4bSPaolo Bonzini /* 10122b94c4bSPaolo Bonzini * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for 10222b94c4bSPaolo Bonzini * avoiding an infinite loop. By design, the root is reachable while 10322b94c4bSPaolo Bonzini * it's being asynchronously zapped, thus a different task can put its 10422b94c4bSPaolo Bonzini * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an 10522b94c4bSPaolo Bonzini * asynchronously zapped root is unavoidable. 10622b94c4bSPaolo Bonzini */ 10722b94c4bSPaolo Bonzini kvm_tdp_mmu_put_root(kvm, root, true); 10822b94c4bSPaolo Bonzini 10922b94c4bSPaolo Bonzini read_unlock(&kvm->mmu_lock); 11022b94c4bSPaolo Bonzini } 11122b94c4bSPaolo Bonzini 11222b94c4bSPaolo Bonzini static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) 11322b94c4bSPaolo Bonzini { 11422b94c4bSPaolo Bonzini root->tdp_mmu_async_data = kvm; 11522b94c4bSPaolo Bonzini INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); 11622b94c4bSPaolo Bonzini queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); 11722b94c4bSPaolo Bonzini } 11822b94c4bSPaolo Bonzini 1198351779cSPaolo Bonzini static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) 1208351779cSPaolo Bonzini { 1218351779cSPaolo Bonzini union kvm_mmu_page_role role = page->role; 1228351779cSPaolo Bonzini role.invalid = true; 1238351779cSPaolo Bonzini 1248351779cSPaolo Bonzini /* No need to use cmpxchg, only the invalid bit can change. */ 1258351779cSPaolo Bonzini role.word = xchg(&page->role.word, role.word); 1268351779cSPaolo Bonzini return role.invalid; 1278351779cSPaolo Bonzini } 1288351779cSPaolo Bonzini 1296103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 1306103bc07SBen Gardon bool shared) 1312bdb3d84SBen Gardon { 1326103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1332bdb3d84SBen Gardon 13411cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 1352bdb3d84SBen Gardon return; 1362bdb3d84SBen Gardon 137de0322f5SSean Christopherson WARN_ON(!is_tdp_mmu_page(root)); 1382bdb3d84SBen Gardon 1398351779cSPaolo Bonzini /* 1408351779cSPaolo Bonzini * The root now has refcount=0. It is valid, but readers already 1418351779cSPaolo Bonzini * cannot acquire a reference to it because kvm_tdp_mmu_get_root() 1428351779cSPaolo Bonzini * rejects it. This remains true for the rest of the execution 1438351779cSPaolo Bonzini * of this function, because readers visit valid roots only 1448351779cSPaolo Bonzini * (except for tdp_mmu_zap_root_work(), which however 1458351779cSPaolo Bonzini * does not acquire any reference itself). 1468351779cSPaolo Bonzini * 1478351779cSPaolo Bonzini * Even though there are flows that need to visit all roots for 1488351779cSPaolo Bonzini * correctness, they all take mmu_lock for write, so they cannot yet 1498351779cSPaolo Bonzini * run concurrently. The same is true after kvm_tdp_root_mark_invalid, 1508351779cSPaolo Bonzini * since the root still has refcount=0. 1518351779cSPaolo Bonzini * 1528351779cSPaolo Bonzini * However, tdp_mmu_zap_root can yield, and writers do not expect to 1538351779cSPaolo Bonzini * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). 1548351779cSPaolo Bonzini * So the root temporarily gets an extra reference, going to refcount=1 1558351779cSPaolo Bonzini * while staying invalid. Readers still cannot acquire any reference; 1568351779cSPaolo Bonzini * but writers are now allowed to run if tdp_mmu_zap_root yields and 157efd995daSPaolo Bonzini * they might take an extra reference if they themselves yield. 158efd995daSPaolo Bonzini * Therefore, when the reference is given back by the worker, 1598351779cSPaolo Bonzini * there is no guarantee that the refcount is still 1. If not, whoever 1608351779cSPaolo Bonzini * puts the last reference will free the page, but they will not have to 1618351779cSPaolo Bonzini * zap the root because a root cannot go from invalid to valid. 1628351779cSPaolo Bonzini */ 1638351779cSPaolo Bonzini if (!kvm_tdp_root_mark_invalid(root)) { 1648351779cSPaolo Bonzini refcount_set(&root->tdp_mmu_root_count, 1); 1658351779cSPaolo Bonzini 1668351779cSPaolo Bonzini /* 167efd995daSPaolo Bonzini * Zapping the root in a worker is not just "nice to have"; 168efd995daSPaolo Bonzini * it is required because kvm_tdp_mmu_invalidate_all_roots() 169efd995daSPaolo Bonzini * skips already-invalid roots. If kvm_tdp_mmu_put_root() did 170efd995daSPaolo Bonzini * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() 171efd995daSPaolo Bonzini * might return with some roots not zapped yet. 1728351779cSPaolo Bonzini */ 173efd995daSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 1748351779cSPaolo Bonzini return; 1758351779cSPaolo Bonzini } 1768351779cSPaolo Bonzini 177c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 178c0e64238SBen Gardon list_del_rcu(&root->link); 179c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 180c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 181a889ea54SBen Gardon } 182a889ea54SBen Gardon 183cfc10997SBen Gardon /* 184d62007edSSean Christopherson * Returns the next root after @prev_root (or the first root if @prev_root is 185d62007edSSean Christopherson * NULL). A reference to the returned root is acquired, and the reference to 186d62007edSSean Christopherson * @prev_root is released (the caller obviously must hold a reference to 187d62007edSSean Christopherson * @prev_root if it's non-NULL). 188d62007edSSean Christopherson * 189d62007edSSean Christopherson * If @only_valid is true, invalid roots are skipped. 190d62007edSSean Christopherson * 191d62007edSSean Christopherson * Returns NULL if the end of tdp_mmu_roots was reached. 192cfc10997SBen Gardon */ 193cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 1946103bc07SBen Gardon struct kvm_mmu_page *prev_root, 195d62007edSSean Christopherson bool shared, bool only_valid) 196a889ea54SBen Gardon { 197a889ea54SBen Gardon struct kvm_mmu_page *next_root; 198a889ea54SBen Gardon 199c0e64238SBen Gardon rcu_read_lock(); 200c0e64238SBen Gardon 201cfc10997SBen Gardon if (prev_root) 202c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 203c0e64238SBen Gardon &prev_root->link, 204c0e64238SBen Gardon typeof(*prev_root), link); 205cfc10997SBen Gardon else 206c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 207cfc10997SBen Gardon typeof(*next_root), link); 208cfc10997SBen Gardon 20904dc4e6cSSean Christopherson while (next_root) { 210d62007edSSean Christopherson if ((!only_valid || !next_root->role.invalid) && 211ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(next_root)) 21204dc4e6cSSean Christopherson break; 21304dc4e6cSSean Christopherson 214c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 215c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 21604dc4e6cSSean Christopherson } 217fb101293SBen Gardon 218c0e64238SBen Gardon rcu_read_unlock(); 219cfc10997SBen Gardon 220cfc10997SBen Gardon if (prev_root) 2216103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 222cfc10997SBen Gardon 223a889ea54SBen Gardon return next_root; 224a889ea54SBen Gardon } 225a889ea54SBen Gardon 226a889ea54SBen Gardon /* 227a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 228a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 229a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 230a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 2316103bc07SBen Gardon * 2326103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 2336103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 2346103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 235a889ea54SBen Gardon */ 236d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ 237d62007edSSean Christopherson for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ 238cfc10997SBen Gardon _root; \ 239d62007edSSean Christopherson _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ 240614f6970SPaolo Bonzini if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \ 241614f6970SPaolo Bonzini kvm_mmu_page_as_id(_root) != _as_id) { \ 242a3f15bdaSSean Christopherson } else 243a889ea54SBen Gardon 244d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 245d62007edSSean Christopherson __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) 246d62007edSSean Christopherson 247614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ 248614f6970SPaolo Bonzini __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) 249d62007edSSean Christopherson 250226b8c8fSSean Christopherson /* 251226b8c8fSSean Christopherson * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 252226b8c8fSSean Christopherson * the implication being that any flow that holds mmu_lock for read is 253226b8c8fSSean Christopherson * inherently yield-friendly and should use the yield-safe variant above. 254226b8c8fSSean Christopherson * Holding mmu_lock for write obviates the need for RCU protection as the list 255226b8c8fSSean Christopherson * is guaranteed to be stable. 256226b8c8fSSean Christopherson */ 257a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 258226b8c8fSSean Christopherson list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ 259226b8c8fSSean Christopherson if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ 260226b8c8fSSean Christopherson kvm_mmu_page_as_id(_root) != _as_id) { \ 261a3f15bdaSSean Christopherson } else 26202c00b3aSBen Gardon 263a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) 26402c00b3aSBen Gardon { 26502c00b3aSBen Gardon struct kvm_mmu_page *sp; 26602c00b3aSBen Gardon 26702c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 26802c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 269a82070b6SDavid Matlack 270a82070b6SDavid Matlack return sp; 271a82070b6SDavid Matlack } 272a82070b6SDavid Matlack 273c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, 274c10743a1SSean Christopherson gfn_t gfn, union kvm_mmu_page_role role) 275a82070b6SDavid Matlack { 27655c510e2SSean Christopherson INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); 277428e9216SSean Christopherson 27802c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 27902c00b3aSBen Gardon 280a3aca4deSDavid Matlack sp->role = role; 28102c00b3aSBen Gardon sp->gfn = gfn; 282c10743a1SSean Christopherson sp->ptep = sptep; 28302c00b3aSBen Gardon sp->tdp_mmu_page = true; 28402c00b3aSBen Gardon 28533dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 28602c00b3aSBen Gardon } 28702c00b3aSBen Gardon 288a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, 289a3aca4deSDavid Matlack struct tdp_iter *iter) 290a3aca4deSDavid Matlack { 291a3aca4deSDavid Matlack struct kvm_mmu_page *parent_sp; 292a3aca4deSDavid Matlack union kvm_mmu_page_role role; 293a3aca4deSDavid Matlack 294a3aca4deSDavid Matlack parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); 295a3aca4deSDavid Matlack 296a3aca4deSDavid Matlack role = parent_sp->role; 297a3aca4deSDavid Matlack role.level--; 298a3aca4deSDavid Matlack 299c10743a1SSean Christopherson tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); 300a3aca4deSDavid Matlack } 301a3aca4deSDavid Matlack 3026e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 30302c00b3aSBen Gardon { 3047a458f0eSPaolo Bonzini union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; 30502c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 30602c00b3aSBen Gardon struct kvm_mmu_page *root; 30702c00b3aSBen Gardon 3086e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 30902c00b3aSBen Gardon 31004dc4e6cSSean Christopherson /* 31104dc4e6cSSean Christopherson * Check for an existing root before allocating a new one. Note, the 31204dc4e6cSSean Christopherson * role check prevents consuming an invalid root. 31304dc4e6cSSean Christopherson */ 314a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 315fb101293SBen Gardon if (root->role.word == role.word && 316ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(root)) 3176e6ec584SSean Christopherson goto out; 31802c00b3aSBen Gardon } 31902c00b3aSBen Gardon 320a82070b6SDavid Matlack root = tdp_mmu_alloc_sp(vcpu); 321c10743a1SSean Christopherson tdp_mmu_init_sp(root, NULL, 0, role); 322a82070b6SDavid Matlack 32311cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 32402c00b3aSBen Gardon 325c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 326c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 327c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 32802c00b3aSBen Gardon 3296e6ec584SSean Christopherson out: 33002c00b3aSBen Gardon return __pa(root->spt); 331fe5db27dSBen Gardon } 3322f2fad08SBen Gardon 3332f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3349a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 3359a77daacSBen Gardon bool shared); 3362f2fad08SBen Gardon 337f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 338f8e14497SBen Gardon { 339f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 340f8e14497SBen Gardon return; 341f8e14497SBen Gardon 342f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 34364bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 34464bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 345f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 346f8e14497SBen Gardon } 347f8e14497SBen Gardon 348a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 349a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 350a6a0b05dSBen Gardon { 351a6a0b05dSBen Gardon bool pfn_changed; 352a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 353a6a0b05dSBen Gardon 354a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 355a6a0b05dSBen Gardon return; 356a6a0b05dSBen Gardon 357a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 358a6a0b05dSBen Gardon 359a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 360a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 361a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 362fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 363a6a0b05dSBen Gardon } 364a6a0b05dSBen Gardon } 365a6a0b05dSBen Gardon 36643a063caSYosry Ahmed static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 36743a063caSYosry Ahmed { 36843a063caSYosry Ahmed kvm_account_pgtable_pages((void *)sp->spt, +1); 369d25ceb92SSean Christopherson atomic64_inc(&kvm->arch.tdp_mmu_pages); 37043a063caSYosry Ahmed } 37143a063caSYosry Ahmed 37243a063caSYosry Ahmed static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 37343a063caSYosry Ahmed { 37443a063caSYosry Ahmed kvm_account_pgtable_pages((void *)sp->spt, -1); 375d25ceb92SSean Christopherson atomic64_dec(&kvm->arch.tdp_mmu_pages); 37643a063caSYosry Ahmed } 37743a063caSYosry Ahmed 3782f2fad08SBen Gardon /** 379c298a30cSDavid Matlack * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages 380a9442f59SBen Gardon * 381a9442f59SBen Gardon * @kvm: kvm instance 382a9442f59SBen Gardon * @sp: the page to be removed 3839a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 3849a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 3859a77daacSBen Gardon * threads that might be adding or removing pages. 386a9442f59SBen Gardon */ 387c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, 3889a77daacSBen Gardon bool shared) 389a9442f59SBen Gardon { 39043a063caSYosry Ahmed tdp_unaccount_mmu_page(kvm, sp); 391d25ceb92SSean Christopherson 392d25ceb92SSean Christopherson if (!sp->nx_huge_page_disallowed) 393d25ceb92SSean Christopherson return; 394d25ceb92SSean Christopherson 3959a77daacSBen Gardon if (shared) 3969a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 3979a77daacSBen Gardon else 398a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 399a9442f59SBen Gardon 40061f94478SSean Christopherson sp->nx_huge_page_disallowed = false; 40161f94478SSean Christopherson untrack_possible_nx_huge_page(kvm, sp); 4029a77daacSBen Gardon 4039a77daacSBen Gardon if (shared) 4049a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 405a9442f59SBen Gardon } 406a9442f59SBen Gardon 407a9442f59SBen Gardon /** 4080f53dfa3SDavid Matlack * handle_removed_pt() - handle a page table removed from the TDP structure 409a066e61fSBen Gardon * 410a066e61fSBen Gardon * @kvm: kvm instance 411a066e61fSBen Gardon * @pt: the page removed from the paging structure 4129a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 4139a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 4149a77daacSBen Gardon * threads that might be modifying SPTEs. 415a066e61fSBen Gardon * 416a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 417a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 41870fb3e41SBen Gardon * 41970fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 42070fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 42170fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 42270fb3e41SBen Gardon * early rcu_dereferences in the function. 423a066e61fSBen Gardon */ 4240f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) 425a066e61fSBen Gardon { 42670fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 427a066e61fSBen Gardon int level = sp->role.level; 428e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 429a066e61fSBen Gardon int i; 430a066e61fSBen Gardon 431a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 432a066e61fSBen Gardon 433c298a30cSDavid Matlack tdp_mmu_unlink_sp(kvm, sp, shared); 434a066e61fSBen Gardon 4352ca3129eSSean Christopherson for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { 436ba3a6120SSean Christopherson tdp_ptep_t sptep = pt + i; 437574c3c55SBen Gardon gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 438ba3a6120SSean Christopherson u64 old_spte; 4399a77daacSBen Gardon 4409a77daacSBen Gardon if (shared) { 441e25f0e0cSBen Gardon /* 442e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 443e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 444e25f0e0cSBen Gardon * already marked as removed then another thread 445e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 446e25f0e0cSBen Gardon * set the SPTE until it is set from some other 447e25f0e0cSBen Gardon * value to the removed SPTE value. 448e25f0e0cSBen Gardon */ 449e25f0e0cSBen Gardon for (;;) { 450ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); 451ba3a6120SSean Christopherson if (!is_removed_spte(old_spte)) 452e25f0e0cSBen Gardon break; 453e25f0e0cSBen Gardon cpu_relax(); 454e25f0e0cSBen Gardon } 4559a77daacSBen Gardon } else { 4568df9f1afSSean Christopherson /* 4578df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 4588df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 4598df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 4608df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 4618df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 4628df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 4638df9f1afSSean Christopherson * unreachable. 4648df9f1afSSean Christopherson */ 465ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_read_spte(sptep); 466ba3a6120SSean Christopherson if (!is_shadow_present_pte(old_spte)) 4678df9f1afSSean Christopherson continue; 468e25f0e0cSBen Gardon 469e25f0e0cSBen Gardon /* 470ba3a6120SSean Christopherson * Use the common helper instead of a raw WRITE_ONCE as 471ba3a6120SSean Christopherson * the SPTE needs to be updated atomically if it can be 472ba3a6120SSean Christopherson * modified by a different vCPU outside of mmu_lock. 473ba3a6120SSean Christopherson * Even though the parent SPTE is !PRESENT, the TLB 474ba3a6120SSean Christopherson * hasn't yet been flushed, and both Intel and AMD 475ba3a6120SSean Christopherson * document that A/D assists can use upper-level PxE 476ba3a6120SSean Christopherson * entries that are cached in the TLB, i.e. the CPU can 477ba3a6120SSean Christopherson * still access the page and mark it dirty. 478ba3a6120SSean Christopherson * 479ba3a6120SSean Christopherson * No retry is needed in the atomic update path as the 480ba3a6120SSean Christopherson * sole concern is dropping a Dirty bit, i.e. no other 481ba3a6120SSean Christopherson * task can zap/remove the SPTE as mmu_lock is held for 482ba3a6120SSean Christopherson * write. Marking the SPTE as a removed SPTE is not 483ba3a6120SSean Christopherson * strictly necessary for the same reason, but using 484ba3a6120SSean Christopherson * the remove SPTE value keeps the shared/exclusive 485ba3a6120SSean Christopherson * paths consistent and allows the handle_changed_spte() 486ba3a6120SSean Christopherson * call below to hardcode the new value to REMOVED_SPTE. 487ba3a6120SSean Christopherson * 488ba3a6120SSean Christopherson * Note, even though dropping a Dirty bit is the only 489ba3a6120SSean Christopherson * scenario where a non-atomic update could result in a 490ba3a6120SSean Christopherson * functional bug, simply checking the Dirty bit isn't 491ba3a6120SSean Christopherson * sufficient as a fast page fault could read the upper 492ba3a6120SSean Christopherson * level SPTE before it is zapped, and then make this 493ba3a6120SSean Christopherson * target SPTE writable, resume the guest, and set the 494ba3a6120SSean Christopherson * Dirty bit between reading the SPTE above and writing 495ba3a6120SSean Christopherson * it here. 496e25f0e0cSBen Gardon */ 497ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, 498ba3a6120SSean Christopherson REMOVED_SPTE, level); 4999a77daacSBen Gardon } 500e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 501ba3a6120SSean Christopherson old_spte, REMOVED_SPTE, level, shared); 502a066e61fSBen Gardon } 503a066e61fSBen Gardon 5047cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 505a066e61fSBen Gardon } 506a066e61fSBen Gardon 507a066e61fSBen Gardon /** 5087f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 5092f2fad08SBen Gardon * @kvm: kvm instance 5102f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 5112f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 5122f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 5132f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 5142f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 5159a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 5169a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 5179a77daacSBen Gardon * threads that might be modifying SPTEs. 5182f2fad08SBen Gardon * 5192f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 5202f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 5212f2fad08SBen Gardon */ 5222f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 5239a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 5249a77daacSBen Gardon bool shared) 5252f2fad08SBen Gardon { 5262f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 5272f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 5282f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 5292f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 5302f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 5312f2fad08SBen Gardon 5322f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 5332f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 534764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 5352f2fad08SBen Gardon 5362f2fad08SBen Gardon /* 5372f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 5382f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 5392f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 540d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 5412f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 5422f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 5432f2fad08SBen Gardon * thread before replacement. 5442f2fad08SBen Gardon */ 5452f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 5462f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 5472f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 5482f2fad08SBen Gardon "different PFN!\n" 5492f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5502f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5512f2fad08SBen Gardon 5522f2fad08SBen Gardon /* 5532f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 554d9f6e12fSIngo Molnar * corruption. 5552f2fad08SBen Gardon */ 5562f2fad08SBen Gardon BUG(); 5572f2fad08SBen Gardon } 5582f2fad08SBen Gardon 5592f2fad08SBen Gardon if (old_spte == new_spte) 5602f2fad08SBen Gardon return; 5612f2fad08SBen Gardon 562b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 563b9a98c34SBen Gardon 564115111efSDavid Matlack if (is_leaf) 565115111efSDavid Matlack check_spte_writable_invariants(new_spte); 566115111efSDavid Matlack 5672f2fad08SBen Gardon /* 5682f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 5692f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 5702f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 5712f2fad08SBen Gardon */ 5722f2fad08SBen Gardon if (!was_present && !is_present) { 5732f2fad08SBen Gardon /* 57408f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 57508f07c80SBen Gardon * it is unexpected. Log the change, though it should not 57608f07c80SBen Gardon * impact the guest since both the former and current SPTEs 57708f07c80SBen Gardon * are nonpresent. 5782f2fad08SBen Gardon */ 57908f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 58008f07c80SBen Gardon !is_mmio_spte(new_spte) && 58108f07c80SBen Gardon !is_removed_spte(new_spte))) 5822f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 5832f2fad08SBen Gardon "should not be replaced with another,\n" 5842f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 58508f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 58608f07c80SBen Gardon "a temporary removed SPTE.\n" 5872f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5882f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5892f2fad08SBen Gardon return; 5902f2fad08SBen Gardon } 5912f2fad08SBen Gardon 59271f51d2cSMingwei Zhang if (is_leaf != was_leaf) 59371f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 5942f2fad08SBen Gardon 5952f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 59664bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 5972f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 5982f2fad08SBen Gardon 5992f2fad08SBen Gardon /* 6002f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 601c8e5a0d0SSean Christopherson * the paging structure. Note the WARN on the PFN changing without the 602c8e5a0d0SSean Christopherson * SPTE being converted to a hugepage (leaf) or being zapped. Shadow 603c8e5a0d0SSean Christopherson * pages are kernel allocations and should never be migrated. 6042f2fad08SBen Gardon */ 605c8e5a0d0SSean Christopherson if (was_present && !was_leaf && 606c8e5a0d0SSean Christopherson (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) 6070f53dfa3SDavid Matlack handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); 6082f2fad08SBen Gardon } 6092f2fad08SBen Gardon 6102f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 6119a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 6129a77daacSBen Gardon bool shared) 6132f2fad08SBen Gardon { 6149a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 6159a77daacSBen Gardon shared); 616f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 617a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 618a6a0b05dSBen Gardon new_spte, level); 6192f2fad08SBen Gardon } 620faaf05b0SBen Gardon 621fe43fa2fSBen Gardon /* 6226ccf4438SPaolo Bonzini * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 6236ccf4438SPaolo Bonzini * and handle the associated bookkeeping. Do not mark the page dirty 62424ae4cfaSBen Gardon * in KVM's dirty bitmaps. 6259a77daacSBen Gardon * 6263255530aSDavid Matlack * If setting the SPTE fails because it has changed, iter->old_spte will be 6273255530aSDavid Matlack * refreshed to the current value of the spte. 6283255530aSDavid Matlack * 6299a77daacSBen Gardon * @kvm: kvm instance 6309a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 6319a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 6323e72c791SDavid Matlack * Return: 6333e72c791SDavid Matlack * * 0 - If the SPTE was set. 6343e72c791SDavid Matlack * * -EBUSY - If the SPTE cannot be set. In this case this function will have 6353e72c791SDavid Matlack * no side-effects other than setting iter->old_spte to the last 6363e72c791SDavid Matlack * known value of the spte. 6379a77daacSBen Gardon */ 6383e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, 6399a77daacSBen Gardon struct tdp_iter *iter, 6409a77daacSBen Gardon u64 new_spte) 6419a77daacSBen Gardon { 6423255530aSDavid Matlack u64 *sptep = rcu_dereference(iter->sptep); 6433255530aSDavid Matlack 644396fd74dSSean Christopherson /* 645396fd74dSSean Christopherson * The caller is responsible for ensuring the old SPTE is not a REMOVED 646396fd74dSSean Christopherson * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, 647396fd74dSSean Christopherson * and pre-checking before inserting a new SPTE is advantageous as it 648396fd74dSSean Christopherson * avoids unnecessary work. 649396fd74dSSean Christopherson */ 650396fd74dSSean Christopherson WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); 6513a0f64deSSean Christopherson 6529a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 6539a77daacSBen Gardon 65408f07c80SBen Gardon /* 6556e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 6566e8eb206SDavid Matlack * does not hold the mmu_lock. 6576e8eb206SDavid Matlack */ 658aee98a68SUros Bizjak if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) 6593e72c791SDavid Matlack return -EBUSY; 6609a77daacSBen Gardon 66124ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 66208889894SSean Christopherson new_spte, iter->level, true); 66324ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 6649a77daacSBen Gardon 6653e72c791SDavid Matlack return 0; 6669a77daacSBen Gardon } 6679a77daacSBen Gardon 6683e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, 66908f07c80SBen Gardon struct tdp_iter *iter) 67008f07c80SBen Gardon { 6713e72c791SDavid Matlack int ret; 6723e72c791SDavid Matlack 67308f07c80SBen Gardon /* 67408f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 67508f07c80SBen Gardon * non-present value. This will stop other threads from 67608f07c80SBen Gardon * immediately installing a present entry in its place 67708f07c80SBen Gardon * before the TLBs are flushed. 67808f07c80SBen Gardon */ 6793e72c791SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); 6803e72c791SDavid Matlack if (ret) 6813e72c791SDavid Matlack return ret; 68208f07c80SBen Gardon 68308f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 68408f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 68508f07c80SBen Gardon 68608f07c80SBen Gardon /* 687ba3a6120SSean Christopherson * No other thread can overwrite the removed SPTE as they must either 688ba3a6120SSean Christopherson * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not 689ba3a6120SSean Christopherson * overwrite the special removed SPTE value. No bookkeeping is needed 690ba3a6120SSean Christopherson * here since the SPTE is going from non-present to non-present. Use 691ba3a6120SSean Christopherson * the raw write helper to avoid an unnecessary check on volatile bits. 69208f07c80SBen Gardon */ 693ba3a6120SSean Christopherson __kvm_tdp_mmu_write_spte(iter->sptep, 0); 69408f07c80SBen Gardon 6953e72c791SDavid Matlack return 0; 69608f07c80SBen Gardon } 69708f07c80SBen Gardon 6989a77daacSBen Gardon 6999a77daacSBen Gardon /* 700fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 701626808d1SSean Christopherson * @kvm: KVM instance 702626808d1SSean Christopherson * @as_id: Address space ID, i.e. regular vs. SMM 703626808d1SSean Christopherson * @sptep: Pointer to the SPTE 704626808d1SSean Christopherson * @old_spte: The current value of the SPTE 705626808d1SSean Christopherson * @new_spte: The new value that will be set for the SPTE 706626808d1SSean Christopherson * @gfn: The base GFN that was (or will be) mapped by the SPTE 707626808d1SSean Christopherson * @level: The level _containing_ the SPTE (its parent PT's level) 708fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 709fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 710fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 711fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 712fe43fa2fSBen Gardon * double counted. 713fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 714fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 715fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 716fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 717fe43fa2fSBen Gardon * writes from being double counted. 718ba3a6120SSean Christopherson * 719ba3a6120SSean Christopherson * Returns the old SPTE value, which _may_ be different than @old_spte if the 720ba3a6120SSean Christopherson * SPTE had voldatile bits. 721fe43fa2fSBen Gardon */ 722ba3a6120SSean Christopherson static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, 723626808d1SSean Christopherson u64 old_spte, u64 new_spte, gfn_t gfn, int level, 724626808d1SSean Christopherson bool record_acc_track, bool record_dirty_log) 725faaf05b0SBen Gardon { 726531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 7273a9a4aa5SBen Gardon 72808f07c80SBen Gardon /* 729966da62aSSean Christopherson * No thread should be using this function to set SPTEs to or from the 73008f07c80SBen Gardon * temporary removed SPTE value. 73108f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 73208f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 73308f07c80SBen Gardon * use of the removed SPTE should not be necessary. 73408f07c80SBen Gardon */ 735626808d1SSean Christopherson WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); 73608f07c80SBen Gardon 737ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); 738faaf05b0SBen Gardon 739626808d1SSean Christopherson __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); 740626808d1SSean Christopherson 741f8e14497SBen Gardon if (record_acc_track) 742626808d1SSean Christopherson handle_changed_spte_acc_track(old_spte, new_spte, level); 743a6a0b05dSBen Gardon if (record_dirty_log) 744626808d1SSean Christopherson handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 745626808d1SSean Christopherson new_spte, level); 746ba3a6120SSean Christopherson return old_spte; 747626808d1SSean Christopherson } 748626808d1SSean Christopherson 749626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 750626808d1SSean Christopherson u64 new_spte, bool record_acc_track, 751626808d1SSean Christopherson bool record_dirty_log) 752626808d1SSean Christopherson { 753626808d1SSean Christopherson WARN_ON_ONCE(iter->yielded); 754626808d1SSean Christopherson 755ba3a6120SSean Christopherson iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, 756ba3a6120SSean Christopherson iter->old_spte, new_spte, 757ba3a6120SSean Christopherson iter->gfn, iter->level, 758626808d1SSean Christopherson record_acc_track, record_dirty_log); 759f8e14497SBen Gardon } 760f8e14497SBen Gardon 761f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 762f8e14497SBen Gardon u64 new_spte) 763f8e14497SBen Gardon { 764626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 765f8e14497SBen Gardon } 766f8e14497SBen Gardon 767f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 768f8e14497SBen Gardon struct tdp_iter *iter, 769f8e14497SBen Gardon u64 new_spte) 770f8e14497SBen Gardon { 771626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 772a6a0b05dSBen Gardon } 773a6a0b05dSBen Gardon 774a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 775a6a0b05dSBen Gardon struct tdp_iter *iter, 776a6a0b05dSBen Gardon u64 new_spte) 777a6a0b05dSBen Gardon { 778626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 779faaf05b0SBen Gardon } 780faaf05b0SBen Gardon 781faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 78277aa6075SDavid Matlack for_each_tdp_pte(_iter, _root, _start, _end) 783faaf05b0SBen Gardon 784f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 785f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 786f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 787f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 788f8e14497SBen Gardon continue; \ 789f8e14497SBen Gardon else 790f8e14497SBen Gardon 791bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 792b9e5603cSPaolo Bonzini for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) 793bb18842eSBen Gardon 794faaf05b0SBen Gardon /* 795e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 796e28a436cSBen Gardon * to the scheduler. 797e28a436cSBen Gardon * 798e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 799e139a34eSBen Gardon * TLB flush before yielding. 800e139a34eSBen Gardon * 8013a0f64deSSean Christopherson * If this function yields, iter->yielded is set and the caller must skip to 8023a0f64deSSean Christopherson * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 8033a0f64deSSean Christopherson * over the paging structures to allow the iterator to continue its traversal 8043a0f64deSSean Christopherson * from the paging structure root. 805e28a436cSBen Gardon * 8063a0f64deSSean Christopherson * Returns true if this function yielded. 807e28a436cSBen Gardon */ 8083a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 8093a0f64deSSean Christopherson struct tdp_iter *iter, 8103a0f64deSSean Christopherson bool flush, bool shared) 811a6a0b05dSBen Gardon { 8123a0f64deSSean Christopherson WARN_ON(iter->yielded); 8133a0f64deSSean Christopherson 814ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 815ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 816ed5e484bSBen Gardon return false; 817ed5e484bSBen Gardon 818531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 819e139a34eSBen Gardon if (flush) 820e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 821e139a34eSBen Gardon 822bd296779SSean Christopherson rcu_read_unlock(); 823bd296779SSean Christopherson 8246103bc07SBen Gardon if (shared) 8256103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 8266103bc07SBen Gardon else 827531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 8286103bc07SBen Gardon 8297cca2d0bSBen Gardon rcu_read_lock(); 830ed5e484bSBen Gardon 831ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 832ed5e484bSBen Gardon 8333a0f64deSSean Christopherson iter->yielded = true; 834a6a0b05dSBen Gardon } 835e28a436cSBen Gardon 8363a0f64deSSean Christopherson return iter->yielded; 837a6a0b05dSBen Gardon } 838a6a0b05dSBen Gardon 83986931ff7SSean Christopherson static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 840e2b5b21dSSean Christopherson { 841e2b5b21dSSean Christopherson /* 84286931ff7SSean Christopherson * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 84386931ff7SSean Christopherson * a gpa range that would exceed the max gfn, and KVM does not create 84486931ff7SSean Christopherson * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 84586931ff7SSean Christopherson * the slow emulation path every time. 846e2b5b21dSSean Christopherson */ 84786931ff7SSean Christopherson return kvm_mmu_max_gfn() + 1; 848e2b5b21dSSean Christopherson } 849e2b5b21dSSean Christopherson 8501b6043e8SSean Christopherson static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8511b6043e8SSean Christopherson bool shared, int zap_level) 852e2b5b21dSSean Christopherson { 853e2b5b21dSSean Christopherson struct tdp_iter iter; 854e2b5b21dSSean Christopherson 85586931ff7SSean Christopherson gfn_t end = tdp_mmu_max_gfn_exclusive(); 856e2b5b21dSSean Christopherson gfn_t start = 0; 857e2b5b21dSSean Christopherson 8581b6043e8SSean Christopherson for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { 8591b6043e8SSean Christopherson retry: 8601b6043e8SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 8611b6043e8SSean Christopherson continue; 8621b6043e8SSean Christopherson 8631b6043e8SSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 8641b6043e8SSean Christopherson continue; 8651b6043e8SSean Christopherson 8661b6043e8SSean Christopherson if (iter.level > zap_level) 8671b6043e8SSean Christopherson continue; 8681b6043e8SSean Christopherson 8691b6043e8SSean Christopherson if (!shared) 8701b6043e8SSean Christopherson tdp_mmu_set_spte(kvm, &iter, 0); 8711b6043e8SSean Christopherson else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) 8721b6043e8SSean Christopherson goto retry; 8731b6043e8SSean Christopherson } 8741b6043e8SSean Christopherson } 8751b6043e8SSean Christopherson 8761b6043e8SSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8771b6043e8SSean Christopherson bool shared) 8781b6043e8SSean Christopherson { 8791b6043e8SSean Christopherson 8808351779cSPaolo Bonzini /* 8818351779cSPaolo Bonzini * The root must have an elevated refcount so that it's reachable via 8828351779cSPaolo Bonzini * mmu_notifier callbacks, which allows this path to yield and drop 8838351779cSPaolo Bonzini * mmu_lock. When handling an unmap/release mmu_notifier command, KVM 8848351779cSPaolo Bonzini * must drop all references to relevant pages prior to completing the 8858351779cSPaolo Bonzini * callback. Dropping mmu_lock with an unreachable root would result 8868351779cSPaolo Bonzini * in zapping SPTEs after a relevant mmu_notifier callback completes 8878351779cSPaolo Bonzini * and lead to use-after-free as zapping a SPTE triggers "writeback" of 8888351779cSPaolo Bonzini * dirty accessed bits to the SPTE's associated struct page. 8898351779cSPaolo Bonzini */ 8908351779cSPaolo Bonzini WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); 8918351779cSPaolo Bonzini 892e2b5b21dSSean Christopherson kvm_lockdep_assert_mmu_lock_held(kvm, shared); 893e2b5b21dSSean Christopherson 894e2b5b21dSSean Christopherson rcu_read_lock(); 895e2b5b21dSSean Christopherson 896e2b5b21dSSean Christopherson /* 8971b6043e8SSean Christopherson * To avoid RCU stalls due to recursively removing huge swaths of SPs, 8981b6043e8SSean Christopherson * split the zap into two passes. On the first pass, zap at the 1gb 8991b6043e8SSean Christopherson * level, and then zap top-level SPs on the second pass. "1gb" is not 9001b6043e8SSean Christopherson * arbitrary, as KVM must be able to zap a 1gb shadow page without 9011b6043e8SSean Christopherson * inducing a stall to allow in-place replacement with a 1gb hugepage. 9021b6043e8SSean Christopherson * 9031b6043e8SSean Christopherson * Because zapping a SP recurses on its children, stepping down to 9041b6043e8SSean Christopherson * PG_LEVEL_4K in the iterator itself is unnecessary. 905e2b5b21dSSean Christopherson */ 9061b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); 9071b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, root->role.level); 908e2b5b21dSSean Christopherson 909e2b5b21dSSean Christopherson rcu_read_unlock(); 910e2b5b21dSSean Christopherson } 911e2b5b21dSSean Christopherson 912c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 913c10743a1SSean Christopherson { 914c10743a1SSean Christopherson u64 old_spte; 915c10743a1SSean Christopherson 916c10743a1SSean Christopherson /* 917c10743a1SSean Christopherson * This helper intentionally doesn't allow zapping a root shadow page, 918c10743a1SSean Christopherson * which doesn't have a parent page table and thus no associated entry. 919c10743a1SSean Christopherson */ 920c10743a1SSean Christopherson if (WARN_ON_ONCE(!sp->ptep)) 921c10743a1SSean Christopherson return false; 922c10743a1SSean Christopherson 923c10743a1SSean Christopherson old_spte = kvm_tdp_mmu_read_spte(sp->ptep); 924bb95dfb9SSean Christopherson if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) 925c10743a1SSean Christopherson return false; 926c10743a1SSean Christopherson 927c10743a1SSean Christopherson __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, 928c10743a1SSean Christopherson sp->gfn, sp->role.level + 1, true, true); 929c10743a1SSean Christopherson 930c10743a1SSean Christopherson return true; 931c10743a1SSean Christopherson } 932c10743a1SSean Christopherson 933faaf05b0SBen Gardon /* 934063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 935063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 936063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 937063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 9386103bc07SBen Gardon * operation can cause a soft lockup. 939faaf05b0SBen Gardon */ 940f47e5bbbSSean Christopherson static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, 941acbda82aSSean Christopherson gfn_t start, gfn_t end, bool can_yield, bool flush) 942faaf05b0SBen Gardon { 943faaf05b0SBen Gardon struct tdp_iter iter; 944faaf05b0SBen Gardon 94586931ff7SSean Christopherson end = min(end, tdp_mmu_max_gfn_exclusive()); 946524a1e4eSSean Christopherson 947acbda82aSSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 9486103bc07SBen Gardon 9497cca2d0bSBen Gardon rcu_read_lock(); 9507cca2d0bSBen Gardon 951f47e5bbbSSean Christopherson for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { 9521af4a960SBen Gardon if (can_yield && 953acbda82aSSean Christopherson tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { 954a835429cSSean Christopherson flush = false; 9551af4a960SBen Gardon continue; 9561af4a960SBen Gardon } 9571af4a960SBen Gardon 958f47e5bbbSSean Christopherson if (!is_shadow_present_pte(iter.old_spte) || 959faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 960faaf05b0SBen Gardon continue; 961faaf05b0SBen Gardon 962faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 963a835429cSSean Christopherson flush = true; 964faaf05b0SBen Gardon } 9657cca2d0bSBen Gardon 9667cca2d0bSBen Gardon rcu_read_unlock(); 967bb95dfb9SSean Christopherson 968f47e5bbbSSean Christopherson /* 969f47e5bbbSSean Christopherson * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need 970f47e5bbbSSean Christopherson * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. 971f47e5bbbSSean Christopherson */ 972f47e5bbbSSean Christopherson return flush; 973faaf05b0SBen Gardon } 974faaf05b0SBen Gardon 975faaf05b0SBen Gardon /* 9767edc3a68SKai Huang * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns 9777edc3a68SKai Huang * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or 9787edc3a68SKai Huang * more SPTEs were zapped since the MMU lock was last acquired. 979faaf05b0SBen Gardon */ 980f47e5bbbSSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, 981f47e5bbbSSean Christopherson bool can_yield, bool flush) 982faaf05b0SBen Gardon { 983faaf05b0SBen Gardon struct kvm_mmu_page *root; 984faaf05b0SBen Gardon 985614f6970SPaolo Bonzini for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) 986f47e5bbbSSean Christopherson flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); 987faaf05b0SBen Gardon 988faaf05b0SBen Gardon return flush; 989faaf05b0SBen Gardon } 990faaf05b0SBen Gardon 991faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 992faaf05b0SBen Gardon { 993e2b5b21dSSean Christopherson struct kvm_mmu_page *root; 9942b9663d8SSean Christopherson int i; 995faaf05b0SBen Gardon 99677c8cd6bSSean Christopherson /* 99722b94c4bSPaolo Bonzini * Zap all roots, including invalid roots, as all SPTEs must be dropped 99822b94c4bSPaolo Bonzini * before returning to the caller. Zap directly even if the root is 99922b94c4bSPaolo Bonzini * also being zapped by a worker. Walking zapped top-level SPTEs isn't 100022b94c4bSPaolo Bonzini * all that expensive and mmu_lock is already held, which means the 100122b94c4bSPaolo Bonzini * worker has yielded, i.e. flushing the work instead of zapping here 100222b94c4bSPaolo Bonzini * isn't guaranteed to be any faster. 100322b94c4bSPaolo Bonzini * 100477c8cd6bSSean Christopherson * A TLB flush is unnecessary, KVM zaps everything if and only the VM 100577c8cd6bSSean Christopherson * is being destroyed or the userspace VMM has exited. In both cases, 100677c8cd6bSSean Christopherson * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. 100777c8cd6bSSean Christopherson */ 1008e2b5b21dSSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1009e2b5b21dSSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, i) 1010e2b5b21dSSean Christopherson tdp_mmu_zap_root(kvm, root, false); 1011e2b5b21dSSean Christopherson } 1012faaf05b0SBen Gardon } 1013bb18842eSBen Gardon 10144c6654bdSBen Gardon /* 1015f28e9c7fSSean Christopherson * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast 101622b94c4bSPaolo Bonzini * zap" completes. 10174c6654bdSBen Gardon */ 10184c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 10194c6654bdSBen Gardon { 102022b94c4bSPaolo Bonzini flush_workqueue(kvm->arch.tdp_mmu_zap_wq); 10214c6654bdSBen Gardon } 10224c6654bdSBen Gardon 1023bb18842eSBen Gardon /* 1024f28e9c7fSSean Christopherson * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that 102522b94c4bSPaolo Bonzini * is about to be zapped, e.g. in response to a memslots update. The actual 102622b94c4bSPaolo Bonzini * zapping is performed asynchronously, so a reference is taken on all roots. 102722b94c4bSPaolo Bonzini * Using a separate workqueue makes it easy to ensure that the destruction is 102822b94c4bSPaolo Bonzini * performed before the "fast zap" completes, without keeping a separate list 102922b94c4bSPaolo Bonzini * of invalidated roots; the list is effectively the list of work items in 103022b94c4bSPaolo Bonzini * the workqueue. 1031b7cccd39SBen Gardon * 103222b94c4bSPaolo Bonzini * Get a reference even if the root is already invalid, the asynchronous worker 103322b94c4bSPaolo Bonzini * assumes it was gifted a reference to the root it processes. Because mmu_lock 103422b94c4bSPaolo Bonzini * is held for write, it should be impossible to observe a root with zero refcount, 103522b94c4bSPaolo Bonzini * i.e. the list of roots cannot be stale. 10364c6654bdSBen Gardon * 1037b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 1038b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 1039b7cccd39SBen Gardon */ 1040b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 1041b7cccd39SBen Gardon { 1042b7cccd39SBen Gardon struct kvm_mmu_page *root; 1043b7cccd39SBen Gardon 1044b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1045f28e9c7fSSean Christopherson list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { 1046efd995daSPaolo Bonzini if (!root->role.invalid && 1047efd995daSPaolo Bonzini !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { 1048b7cccd39SBen Gardon root->role.invalid = true; 104922b94c4bSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 105022b94c4bSPaolo Bonzini } 1051b7cccd39SBen Gardon } 1052f28e9c7fSSean Christopherson } 1053b7cccd39SBen Gardon 1054bb18842eSBen Gardon /* 1055bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 1056bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 1057bb18842eSBen Gardon */ 1058cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 1059cdc47767SPaolo Bonzini struct kvm_page_fault *fault, 1060cdc47767SPaolo Bonzini struct tdp_iter *iter) 1061bb18842eSBen Gardon { 1062c435d4b7SSean Christopherson struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 1063bb18842eSBen Gardon u64 new_spte; 106457a3e96dSKai Huang int ret = RET_PF_FIXED; 1065ad67e480SPaolo Bonzini bool wrprot = false; 1066bb18842eSBen Gardon 106750a9ac25SSean Christopherson if (WARN_ON_ONCE(sp->role.level != fault->goal_level)) 106850a9ac25SSean Christopherson return RET_PF_RETRY; 106950a9ac25SSean Christopherson 1070e710c5f6SDavid Matlack if (unlikely(!fault->slot)) 1071bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 10729a77daacSBen Gardon else 107353597858SDavid Matlack wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 10742839180cSPaolo Bonzini fault->pfn, iter->old_spte, fault->prefetch, true, 10757158bee4SPaolo Bonzini fault->map_writable, &new_spte); 1076bb18842eSBen Gardon 1077bb18842eSBen Gardon if (new_spte == iter->old_spte) 1078bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 10793e72c791SDavid Matlack else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 10809a77daacSBen Gardon return RET_PF_RETRY; 1081bb95dfb9SSean Christopherson else if (is_shadow_present_pte(iter->old_spte) && 1082bb95dfb9SSean Christopherson !is_last_spte(iter->old_spte, iter->level)) 1083*1e203847SHou Wenlong kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); 1084bb18842eSBen Gardon 1085bb18842eSBen Gardon /* 1086bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 1087bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 1088bb18842eSBen Gardon * the vCPU would have the same fault again. 1089bb18842eSBen Gardon */ 1090ad67e480SPaolo Bonzini if (wrprot) { 1091cdc47767SPaolo Bonzini if (fault->write) 1092bb18842eSBen Gardon ret = RET_PF_EMULATE; 1093bb18842eSBen Gardon } 1094bb18842eSBen Gardon 1095bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 10969a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 10971075d41eSSean Christopherson vcpu->stat.pf_mmio_spte_created++; 10989a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 10999a77daacSBen Gardon new_spte); 1100bb18842eSBen Gardon ret = RET_PF_EMULATE; 11013849e092SSean Christopherson } else { 11029a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 11039a77daacSBen Gardon rcu_dereference(iter->sptep)); 11043849e092SSean Christopherson } 1105bb18842eSBen Gardon 1106bb18842eSBen Gardon return ret; 1107bb18842eSBen Gardon } 1108bb18842eSBen Gardon 1109bb18842eSBen Gardon /* 1110cb00a70bSDavid Matlack * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1111cb00a70bSDavid Matlack * provided page table. 11127b7e1ab6SDavid Matlack * 11137b7e1ab6SDavid Matlack * @kvm: kvm instance 11147b7e1ab6SDavid Matlack * @iter: a tdp_iter instance currently on the SPTE that should be set 11157b7e1ab6SDavid Matlack * @sp: The new TDP page table to install. 1116cb00a70bSDavid Matlack * @shared: This operation is running under the MMU lock in read mode. 11177b7e1ab6SDavid Matlack * 11187b7e1ab6SDavid Matlack * Returns: 0 if the new page table was installed. Non-0 if the page table 11197b7e1ab6SDavid Matlack * could not be installed (e.g. the atomic compare-exchange failed). 11207b7e1ab6SDavid Matlack */ 1121cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, 112261f94478SSean Christopherson struct kvm_mmu_page *sp, bool shared) 11237b7e1ab6SDavid Matlack { 112454275f74SSean Christopherson u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); 1125cb00a70bSDavid Matlack int ret = 0; 11267b7e1ab6SDavid Matlack 1127cb00a70bSDavid Matlack if (shared) { 11287b7e1ab6SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); 11297b7e1ab6SDavid Matlack if (ret) 11307b7e1ab6SDavid Matlack return ret; 1131cb00a70bSDavid Matlack } else { 1132cb00a70bSDavid Matlack tdp_mmu_set_spte(kvm, iter, spte); 1133cb00a70bSDavid Matlack } 11347b7e1ab6SDavid Matlack 113543a063caSYosry Ahmed tdp_account_mmu_page(kvm, sp); 11367b7e1ab6SDavid Matlack 11377b7e1ab6SDavid Matlack return 0; 11387b7e1ab6SDavid Matlack } 11397b7e1ab6SDavid Matlack 1140c4b33d28SDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1141c4b33d28SDavid Matlack struct kvm_mmu_page *sp, bool shared); 1142c4b33d28SDavid Matlack 11437b7e1ab6SDavid Matlack /* 1144bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 1145bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 1146bb18842eSBen Gardon */ 11472f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 1148bb18842eSBen Gardon { 1149bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 115061f94478SSean Christopherson struct kvm *kvm = vcpu->kvm; 1151bb18842eSBen Gardon struct tdp_iter iter; 115289c0fd49SBen Gardon struct kvm_mmu_page *sp; 115363d28a25SPaolo Bonzini int ret = RET_PF_RETRY; 1154bb18842eSBen Gardon 115573a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault); 1156bb18842eSBen Gardon 1157f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault); 11587cca2d0bSBen Gardon 11597cca2d0bSBen Gardon rcu_read_lock(); 11607cca2d0bSBen Gardon 11612f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 116263d28a25SPaolo Bonzini int r; 116363d28a25SPaolo Bonzini 116473a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled) 1165536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 1166bb18842eSBen Gardon 1167bb18842eSBen Gardon /* 1168c4b33d28SDavid Matlack * If SPTE has been frozen by another thread, just give up and 1169c4b33d28SDavid Matlack * retry, avoiding unnecessary page table allocation and free. 1170ff76d506SKai Huang */ 1171ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 117263d28a25SPaolo Bonzini goto retry; 117363d28a25SPaolo Bonzini 1174f5d16bb9SSean Christopherson if (iter.level == fault->goal_level) 117580a3e4aeSSean Christopherson goto map_target_level; 1176f5d16bb9SSean Christopherson 117763d28a25SPaolo Bonzini /* Step down into the lower level page table if it exists. */ 117863d28a25SPaolo Bonzini if (is_shadow_present_pte(iter.old_spte) && 117963d28a25SPaolo Bonzini !is_large_pte(iter.old_spte)) 118063d28a25SPaolo Bonzini continue; 1181ff76d506SKai Huang 1182c4b33d28SDavid Matlack /* 1183c4b33d28SDavid Matlack * The SPTE is either non-present or points to a huge page that 1184c4b33d28SDavid Matlack * needs to be split. 1185c4b33d28SDavid Matlack */ 1186a82070b6SDavid Matlack sp = tdp_mmu_alloc_sp(vcpu); 1187a82070b6SDavid Matlack tdp_mmu_init_child_sp(sp, &iter); 1188a82070b6SDavid Matlack 118961f94478SSean Christopherson sp->nx_huge_page_disallowed = fault->huge_page_disallowed; 119061f94478SSean Christopherson 1191c4b33d28SDavid Matlack if (is_shadow_present_pte(iter.old_spte)) 119263d28a25SPaolo Bonzini r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); 1193c4b33d28SDavid Matlack else 119463d28a25SPaolo Bonzini r = tdp_mmu_link_sp(kvm, &iter, sp, true); 1195c4b33d28SDavid Matlack 119663d28a25SPaolo Bonzini /* 119780a3e4aeSSean Christopherson * Force the guest to retry if installing an upper level SPTE 119880a3e4aeSSean Christopherson * failed, e.g. because a different task modified the SPTE. 119963d28a25SPaolo Bonzini */ 120063d28a25SPaolo Bonzini if (r) { 12019a77daacSBen Gardon tdp_mmu_free_sp(sp); 120263d28a25SPaolo Bonzini goto retry; 12039a77daacSBen Gardon } 120461f94478SSean Christopherson 120561f94478SSean Christopherson if (fault->huge_page_disallowed && 120661f94478SSean Christopherson fault->req_level >= iter.level) { 120761f94478SSean Christopherson spin_lock(&kvm->arch.tdp_mmu_pages_lock); 120821a36ac6SSean Christopherson if (sp->nx_huge_page_disallowed) 120961f94478SSean Christopherson track_possible_nx_huge_page(kvm, sp); 121061f94478SSean Christopherson spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 121161f94478SSean Christopherson } 1212bb18842eSBen Gardon } 1213bb18842eSBen Gardon 121480a3e4aeSSean Christopherson /* 121580a3e4aeSSean Christopherson * The walk aborted before reaching the target level, e.g. because the 121680a3e4aeSSean Christopherson * iterator detected an upper level SPTE was frozen during traversal. 121780a3e4aeSSean Christopherson */ 121880a3e4aeSSean Christopherson WARN_ON_ONCE(iter.level == fault->goal_level); 121980a3e4aeSSean Christopherson goto retry; 122080a3e4aeSSean Christopherson 122180a3e4aeSSean Christopherson map_target_level: 1222cdc47767SPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 1223bb18842eSBen Gardon 122463d28a25SPaolo Bonzini retry: 122563d28a25SPaolo Bonzini rcu_read_unlock(); 1226bb18842eSBen Gardon return ret; 1227bb18842eSBen Gardon } 1228063afacdSBen Gardon 12293039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 12303039bcc7SSean Christopherson bool flush) 1231063afacdSBen Gardon { 1232f47e5bbbSSean Christopherson return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, 123383b83a02SSean Christopherson range->end, range->may_block, flush); 12343039bcc7SSean Christopherson } 12353039bcc7SSean Christopherson 12363039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 12373039bcc7SSean Christopherson struct kvm_gfn_range *range); 12383039bcc7SSean Christopherson 12393039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 12403039bcc7SSean Christopherson struct kvm_gfn_range *range, 1241c1b91493SSean Christopherson tdp_handler_t handler) 1242063afacdSBen Gardon { 1243063afacdSBen Gardon struct kvm_mmu_page *root; 12443039bcc7SSean Christopherson struct tdp_iter iter; 12453039bcc7SSean Christopherson bool ret = false; 1246063afacdSBen Gardon 1247063afacdSBen Gardon /* 1248e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1249e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1250063afacdSBen Gardon */ 12513039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 1252a151acecSSean Christopherson rcu_read_lock(); 1253a151acecSSean Christopherson 12543039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 12553039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 1256063afacdSBen Gardon 12573039bcc7SSean Christopherson rcu_read_unlock(); 1258a151acecSSean Christopherson } 1259063afacdSBen Gardon 1260063afacdSBen Gardon return ret; 1261063afacdSBen Gardon } 1262063afacdSBen Gardon 1263f8e14497SBen Gardon /* 1264f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1265f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1266f8e14497SBen Gardon */ 12673039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 12683039bcc7SSean Christopherson struct kvm_gfn_range *range) 1269f8e14497SBen Gardon { 1270f8e14497SBen Gardon u64 new_spte = 0; 1271f8e14497SBen Gardon 12723039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 12733039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 12743039bcc7SSean Christopherson return false; 12757cca2d0bSBen Gardon 12763039bcc7SSean Christopherson new_spte = iter->old_spte; 1277f8e14497SBen Gardon 1278f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 12798f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1280f8e14497SBen Gardon } else { 1281f8e14497SBen Gardon /* 1282f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1283f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1284f8e14497SBen Gardon */ 1285f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1286f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1287f8e14497SBen Gardon 1288f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1289f8e14497SBen Gardon } 1290f8e14497SBen Gardon 12913039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 129233dd3574SBen Gardon 12933039bcc7SSean Christopherson return true; 1294f8e14497SBen Gardon } 1295f8e14497SBen Gardon 12963039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1297f8e14497SBen Gardon { 12983039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1299f8e14497SBen Gardon } 1300f8e14497SBen Gardon 13013039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 13023039bcc7SSean Christopherson struct kvm_gfn_range *range) 1303f8e14497SBen Gardon { 13043039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1305f8e14497SBen Gardon } 1306f8e14497SBen Gardon 13073039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1308f8e14497SBen Gardon { 13093039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 13103039bcc7SSean Christopherson } 13113039bcc7SSean Christopherson 13123039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 13133039bcc7SSean Christopherson struct kvm_gfn_range *range) 13143039bcc7SSean Christopherson { 13153039bcc7SSean Christopherson u64 new_spte; 13163039bcc7SSean Christopherson 13173039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 13183039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 13193039bcc7SSean Christopherson 13203039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 13213039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 13223039bcc7SSean Christopherson return false; 13233039bcc7SSean Christopherson 13243039bcc7SSean Christopherson /* 13253039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 13263039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 13273039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 13283039bcc7SSean Christopherson * See __handle_changed_spte(). 13293039bcc7SSean Christopherson */ 13303039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 13313039bcc7SSean Christopherson 13323039bcc7SSean Christopherson if (!pte_write(range->pte)) { 13333039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 13343039bcc7SSean Christopherson pte_pfn(range->pte)); 13353039bcc7SSean Christopherson 13363039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 13373039bcc7SSean Christopherson } 13383039bcc7SSean Christopherson 13393039bcc7SSean Christopherson return true; 1340f8e14497SBen Gardon } 13411d8dd6b3SBen Gardon 13421d8dd6b3SBen Gardon /* 13431d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 13441d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 13451d8dd6b3SBen Gardon * notifier. 13461d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 13471d8dd6b3SBen Gardon */ 13483039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 13491d8dd6b3SBen Gardon { 135093fa50f6SSean Christopherson /* 135193fa50f6SSean Christopherson * No need to handle the remote TLB flush under RCU protection, the 135293fa50f6SSean Christopherson * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a 135393fa50f6SSean Christopherson * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). 135493fa50f6SSean Christopherson */ 135593fa50f6SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 13561d8dd6b3SBen Gardon } 13571d8dd6b3SBen Gardon 1358a6a0b05dSBen Gardon /* 1359bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1360bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1361bedd9195SDavid Matlack * be flushed. 1362a6a0b05dSBen Gardon */ 1363a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1364a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1365a6a0b05dSBen Gardon { 1366a6a0b05dSBen Gardon struct tdp_iter iter; 1367a6a0b05dSBen Gardon u64 new_spte; 1368a6a0b05dSBen Gardon bool spte_set = false; 1369a6a0b05dSBen Gardon 13707cca2d0bSBen Gardon rcu_read_lock(); 13717cca2d0bSBen Gardon 1372a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1373a6a0b05dSBen Gardon 137477aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, start, end) { 137524ae4cfaSBen Gardon retry: 137624ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 13771af4a960SBen Gardon continue; 13781af4a960SBen Gardon 1379a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 13800f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 13810f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1382a6a0b05dSBen Gardon continue; 1383a6a0b05dSBen Gardon 1384a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1385a6a0b05dSBen Gardon 13863e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 138724ae4cfaSBen Gardon goto retry; 13883255530aSDavid Matlack 1389a6a0b05dSBen Gardon spte_set = true; 1390a6a0b05dSBen Gardon } 13917cca2d0bSBen Gardon 13927cca2d0bSBen Gardon rcu_read_unlock(); 1393a6a0b05dSBen Gardon return spte_set; 1394a6a0b05dSBen Gardon } 1395a6a0b05dSBen Gardon 1396a6a0b05dSBen Gardon /* 1397a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1398a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1399a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1400a6a0b05dSBen Gardon */ 1401269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1402269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1403a6a0b05dSBen Gardon { 1404a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1405a6a0b05dSBen Gardon bool spte_set = false; 1406a6a0b05dSBen Gardon 140724ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1408a6a0b05dSBen Gardon 1409d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1410a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1411a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1412a6a0b05dSBen Gardon 1413a6a0b05dSBen Gardon return spte_set; 1414a6a0b05dSBen Gardon } 1415a6a0b05dSBen Gardon 1416a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) 1417a3fe5dbdSDavid Matlack { 1418a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1419a3fe5dbdSDavid Matlack 1420a3fe5dbdSDavid Matlack gfp |= __GFP_ZERO; 1421a3fe5dbdSDavid Matlack 1422a3fe5dbdSDavid Matlack sp = kmem_cache_alloc(mmu_page_header_cache, gfp); 1423a3fe5dbdSDavid Matlack if (!sp) 1424a3fe5dbdSDavid Matlack return NULL; 1425a3fe5dbdSDavid Matlack 1426a3fe5dbdSDavid Matlack sp->spt = (void *)__get_free_page(gfp); 1427a3fe5dbdSDavid Matlack if (!sp->spt) { 1428a3fe5dbdSDavid Matlack kmem_cache_free(mmu_page_header_cache, sp); 1429a3fe5dbdSDavid Matlack return NULL; 1430a3fe5dbdSDavid Matlack } 1431a3fe5dbdSDavid Matlack 1432a3fe5dbdSDavid Matlack return sp; 1433a3fe5dbdSDavid Matlack } 1434a3fe5dbdSDavid Matlack 1435a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, 1436cb00a70bSDavid Matlack struct tdp_iter *iter, 1437cb00a70bSDavid Matlack bool shared) 1438a3fe5dbdSDavid Matlack { 1439a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1440a3fe5dbdSDavid Matlack 1441a3fe5dbdSDavid Matlack /* 1442a3fe5dbdSDavid Matlack * Since we are allocating while under the MMU lock we have to be 1443a3fe5dbdSDavid Matlack * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct 1444a3fe5dbdSDavid Matlack * reclaim and to avoid making any filesystem callbacks (which can end 1445a3fe5dbdSDavid Matlack * up invoking KVM MMU notifiers, resulting in a deadlock). 1446a3fe5dbdSDavid Matlack * 1447a3fe5dbdSDavid Matlack * If this allocation fails we drop the lock and retry with reclaim 1448a3fe5dbdSDavid Matlack * allowed. 1449a3fe5dbdSDavid Matlack */ 1450a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); 1451a3fe5dbdSDavid Matlack if (sp) 1452a3fe5dbdSDavid Matlack return sp; 1453a3fe5dbdSDavid Matlack 1454a3fe5dbdSDavid Matlack rcu_read_unlock(); 1455cb00a70bSDavid Matlack 1456cb00a70bSDavid Matlack if (shared) 1457a3fe5dbdSDavid Matlack read_unlock(&kvm->mmu_lock); 1458cb00a70bSDavid Matlack else 1459cb00a70bSDavid Matlack write_unlock(&kvm->mmu_lock); 1460a3fe5dbdSDavid Matlack 1461a3fe5dbdSDavid Matlack iter->yielded = true; 1462a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); 1463a3fe5dbdSDavid Matlack 1464cb00a70bSDavid Matlack if (shared) 1465a3fe5dbdSDavid Matlack read_lock(&kvm->mmu_lock); 1466cb00a70bSDavid Matlack else 1467cb00a70bSDavid Matlack write_lock(&kvm->mmu_lock); 1468cb00a70bSDavid Matlack 1469a3fe5dbdSDavid Matlack rcu_read_lock(); 1470a3fe5dbdSDavid Matlack 1471a3fe5dbdSDavid Matlack return sp; 1472a3fe5dbdSDavid Matlack } 1473a3fe5dbdSDavid Matlack 1474c4b33d28SDavid Matlack /* Note, the caller is responsible for initializing @sp. */ 1475cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1476cb00a70bSDavid Matlack struct kvm_mmu_page *sp, bool shared) 1477a3fe5dbdSDavid Matlack { 1478a3fe5dbdSDavid Matlack const u64 huge_spte = iter->old_spte; 1479a3fe5dbdSDavid Matlack const int level = iter->level; 1480a3fe5dbdSDavid Matlack int ret, i; 1481a3fe5dbdSDavid Matlack 1482a3fe5dbdSDavid Matlack /* 1483a3fe5dbdSDavid Matlack * No need for atomics when writing to sp->spt since the page table has 1484a3fe5dbdSDavid Matlack * not been linked in yet and thus is not reachable from any other CPU. 1485a3fe5dbdSDavid Matlack */ 14862ca3129eSSean Christopherson for (i = 0; i < SPTE_ENT_PER_PAGE; i++) 148747855da0SDavid Matlack sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); 1488a3fe5dbdSDavid Matlack 1489a3fe5dbdSDavid Matlack /* 1490a3fe5dbdSDavid Matlack * Replace the huge spte with a pointer to the populated lower level 1491a3fe5dbdSDavid Matlack * page table. Since we are making this change without a TLB flush vCPUs 1492a3fe5dbdSDavid Matlack * will see a mix of the split mappings and the original huge mapping, 1493a3fe5dbdSDavid Matlack * depending on what's currently in their TLB. This is fine from a 1494a3fe5dbdSDavid Matlack * correctness standpoint since the translation will be the same either 1495a3fe5dbdSDavid Matlack * way. 1496a3fe5dbdSDavid Matlack */ 149761f94478SSean Christopherson ret = tdp_mmu_link_sp(kvm, iter, sp, shared); 1498a3fe5dbdSDavid Matlack if (ret) 1499e0b728b1SDavid Matlack goto out; 1500a3fe5dbdSDavid Matlack 1501a3fe5dbdSDavid Matlack /* 1502a3fe5dbdSDavid Matlack * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we 1503a3fe5dbdSDavid Matlack * are overwriting from the page stats. But we have to manually update 1504a3fe5dbdSDavid Matlack * the page stats with the new present child pages. 1505a3fe5dbdSDavid Matlack */ 15062ca3129eSSean Christopherson kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); 1507a3fe5dbdSDavid Matlack 1508e0b728b1SDavid Matlack out: 1509e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); 1510e0b728b1SDavid Matlack return ret; 1511a3fe5dbdSDavid Matlack } 1512a3fe5dbdSDavid Matlack 1513a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, 1514a3fe5dbdSDavid Matlack struct kvm_mmu_page *root, 1515a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1516cb00a70bSDavid Matlack int target_level, bool shared) 1517a3fe5dbdSDavid Matlack { 1518a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp = NULL; 1519a3fe5dbdSDavid Matlack struct tdp_iter iter; 1520a3fe5dbdSDavid Matlack int ret = 0; 1521a3fe5dbdSDavid Matlack 1522a3fe5dbdSDavid Matlack rcu_read_lock(); 1523a3fe5dbdSDavid Matlack 1524a3fe5dbdSDavid Matlack /* 1525a3fe5dbdSDavid Matlack * Traverse the page table splitting all huge pages above the target 1526a3fe5dbdSDavid Matlack * level into one lower level. For example, if we encounter a 1GB page 1527a3fe5dbdSDavid Matlack * we split it into 512 2MB pages. 1528a3fe5dbdSDavid Matlack * 1529a3fe5dbdSDavid Matlack * Since the TDP iterator uses a pre-order traversal, we are guaranteed 1530a3fe5dbdSDavid Matlack * to visit an SPTE before ever visiting its children, which means we 1531a3fe5dbdSDavid Matlack * will correctly recursively split huge pages that are more than one 1532a3fe5dbdSDavid Matlack * level above the target level (e.g. splitting a 1GB to 512 2MB pages, 1533a3fe5dbdSDavid Matlack * and then splitting each of those to 512 4KB pages). 1534a3fe5dbdSDavid Matlack */ 1535a3fe5dbdSDavid Matlack for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { 1536a3fe5dbdSDavid Matlack retry: 1537cb00a70bSDavid Matlack if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 1538a3fe5dbdSDavid Matlack continue; 1539a3fe5dbdSDavid Matlack 1540a3fe5dbdSDavid Matlack if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) 1541a3fe5dbdSDavid Matlack continue; 1542a3fe5dbdSDavid Matlack 1543a3fe5dbdSDavid Matlack if (!sp) { 1544cb00a70bSDavid Matlack sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); 1545a3fe5dbdSDavid Matlack if (!sp) { 1546a3fe5dbdSDavid Matlack ret = -ENOMEM; 1547e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter.gfn, 1548e0b728b1SDavid Matlack iter.old_spte, 1549e0b728b1SDavid Matlack iter.level, ret); 1550a3fe5dbdSDavid Matlack break; 1551a3fe5dbdSDavid Matlack } 1552a3fe5dbdSDavid Matlack 1553a3fe5dbdSDavid Matlack if (iter.yielded) 1554a3fe5dbdSDavid Matlack continue; 1555a3fe5dbdSDavid Matlack } 1556a3fe5dbdSDavid Matlack 1557c4b33d28SDavid Matlack tdp_mmu_init_child_sp(sp, &iter); 1558c4b33d28SDavid Matlack 1559cb00a70bSDavid Matlack if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) 1560a3fe5dbdSDavid Matlack goto retry; 1561a3fe5dbdSDavid Matlack 1562a3fe5dbdSDavid Matlack sp = NULL; 1563a3fe5dbdSDavid Matlack } 1564a3fe5dbdSDavid Matlack 1565a3fe5dbdSDavid Matlack rcu_read_unlock(); 1566a3fe5dbdSDavid Matlack 1567a3fe5dbdSDavid Matlack /* 1568a3fe5dbdSDavid Matlack * It's possible to exit the loop having never used the last sp if, for 1569a3fe5dbdSDavid Matlack * example, a vCPU doing HugePage NX splitting wins the race and 1570a3fe5dbdSDavid Matlack * installs its own sp in place of the last sp we tried to split. 1571a3fe5dbdSDavid Matlack */ 1572a3fe5dbdSDavid Matlack if (sp) 1573a3fe5dbdSDavid Matlack tdp_mmu_free_sp(sp); 1574a3fe5dbdSDavid Matlack 1575a3fe5dbdSDavid Matlack return ret; 1576a3fe5dbdSDavid Matlack } 1577a3fe5dbdSDavid Matlack 1578cb00a70bSDavid Matlack 1579a3fe5dbdSDavid Matlack /* 1580a3fe5dbdSDavid Matlack * Try to split all huge pages mapped by the TDP MMU down to the target level. 1581a3fe5dbdSDavid Matlack */ 1582a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 1583a3fe5dbdSDavid Matlack const struct kvm_memory_slot *slot, 1584a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1585cb00a70bSDavid Matlack int target_level, bool shared) 1586a3fe5dbdSDavid Matlack { 1587a3fe5dbdSDavid Matlack struct kvm_mmu_page *root; 1588a3fe5dbdSDavid Matlack int r = 0; 1589a3fe5dbdSDavid Matlack 1590cb00a70bSDavid Matlack kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1591a3fe5dbdSDavid Matlack 15927c554d8eSPaolo Bonzini for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { 1593cb00a70bSDavid Matlack r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); 1594a3fe5dbdSDavid Matlack if (r) { 1595cb00a70bSDavid Matlack kvm_tdp_mmu_put_root(kvm, root, shared); 1596a3fe5dbdSDavid Matlack break; 1597a3fe5dbdSDavid Matlack } 1598a3fe5dbdSDavid Matlack } 1599a3fe5dbdSDavid Matlack } 1600a3fe5dbdSDavid Matlack 1601a6a0b05dSBen Gardon /* 1602a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1603a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1604a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1605a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1606a6a0b05dSBen Gardon * be flushed. 1607a6a0b05dSBen Gardon */ 1608a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1609a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1610a6a0b05dSBen Gardon { 1611a6a0b05dSBen Gardon struct tdp_iter iter; 1612a6a0b05dSBen Gardon u64 new_spte; 1613a6a0b05dSBen Gardon bool spte_set = false; 1614a6a0b05dSBen Gardon 16157cca2d0bSBen Gardon rcu_read_lock(); 16167cca2d0bSBen Gardon 1617a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 161824ae4cfaSBen Gardon retry: 161924ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 16201af4a960SBen Gardon continue; 16211af4a960SBen Gardon 16223354ef5aSSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 16233354ef5aSSean Christopherson continue; 16243354ef5aSSean Christopherson 1625a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1626a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1627a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1628a6a0b05dSBen Gardon else 1629a6a0b05dSBen Gardon continue; 1630a6a0b05dSBen Gardon } else { 1631a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1632a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1633a6a0b05dSBen Gardon else 1634a6a0b05dSBen Gardon continue; 1635a6a0b05dSBen Gardon } 1636a6a0b05dSBen Gardon 16373e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 163824ae4cfaSBen Gardon goto retry; 16393255530aSDavid Matlack 1640a6a0b05dSBen Gardon spte_set = true; 1641a6a0b05dSBen Gardon } 16427cca2d0bSBen Gardon 16437cca2d0bSBen Gardon rcu_read_unlock(); 1644a6a0b05dSBen Gardon return spte_set; 1645a6a0b05dSBen Gardon } 1646a6a0b05dSBen Gardon 1647a6a0b05dSBen Gardon /* 1648a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1649a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1650a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1651a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1652a6a0b05dSBen Gardon * be flushed. 1653a6a0b05dSBen Gardon */ 1654269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1655269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1656a6a0b05dSBen Gardon { 1657a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1658a6a0b05dSBen Gardon bool spte_set = false; 1659a6a0b05dSBen Gardon 166024ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1661a6a0b05dSBen Gardon 1662d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1663a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1664a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1665a6a0b05dSBen Gardon 1666a6a0b05dSBen Gardon return spte_set; 1667a6a0b05dSBen Gardon } 1668a6a0b05dSBen Gardon 1669a6a0b05dSBen Gardon /* 1670a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1671a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1672a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1673a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1674a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1675a6a0b05dSBen Gardon */ 1676a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1677a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1678a6a0b05dSBen Gardon { 1679a6a0b05dSBen Gardon struct tdp_iter iter; 1680a6a0b05dSBen Gardon u64 new_spte; 1681a6a0b05dSBen Gardon 16827cca2d0bSBen Gardon rcu_read_lock(); 16837cca2d0bSBen Gardon 1684a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1685a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1686a6a0b05dSBen Gardon if (!mask) 1687a6a0b05dSBen Gardon break; 1688a6a0b05dSBen Gardon 1689a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1690a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1691a6a0b05dSBen Gardon continue; 1692a6a0b05dSBen Gardon 1693f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1694f1b3b06aSBen Gardon 1695a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1696a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1697a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1698a6a0b05dSBen Gardon else 1699a6a0b05dSBen Gardon continue; 1700a6a0b05dSBen Gardon } else { 1701a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1702a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1703a6a0b05dSBen Gardon else 1704a6a0b05dSBen Gardon continue; 1705a6a0b05dSBen Gardon } 1706a6a0b05dSBen Gardon 1707a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1708a6a0b05dSBen Gardon } 17097cca2d0bSBen Gardon 17107cca2d0bSBen Gardon rcu_read_unlock(); 1711a6a0b05dSBen Gardon } 1712a6a0b05dSBen Gardon 1713a6a0b05dSBen Gardon /* 1714a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1715a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1716a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1717a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1718a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1719a6a0b05dSBen Gardon */ 1720a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1721a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1722a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1723a6a0b05dSBen Gardon bool wrprot) 1724a6a0b05dSBen Gardon { 1725a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1726a6a0b05dSBen Gardon 1727531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1728a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1729a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1730a6a0b05dSBen Gardon } 1731a6a0b05dSBen Gardon 17324b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm, 173314881998SBen Gardon struct kvm_mmu_page *root, 17344b85c921SSean Christopherson const struct kvm_memory_slot *slot) 173514881998SBen Gardon { 17369eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 17379eba50f8SSean Christopherson gfn_t end = start + slot->npages; 173814881998SBen Gardon struct tdp_iter iter; 17395ba7c4c6SBen Gardon int max_mapping_level; 174014881998SBen Gardon 17417cca2d0bSBen Gardon rcu_read_lock(); 17427cca2d0bSBen Gardon 174385f44f8cSSean Christopherson for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { 174485f44f8cSSean Christopherson retry: 17454b85c921SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 17461af4a960SBen Gardon continue; 17471af4a960SBen Gardon 174885f44f8cSSean Christopherson if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || 174985f44f8cSSean Christopherson !is_shadow_present_pte(iter.old_spte)) 175085f44f8cSSean Christopherson continue; 175185f44f8cSSean Christopherson 175285f44f8cSSean Christopherson /* 175385f44f8cSSean Christopherson * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with 175485f44f8cSSean Christopherson * a large page size, then its parent would have been zapped 175585f44f8cSSean Christopherson * instead of stepping down. 175685f44f8cSSean Christopherson */ 175785f44f8cSSean Christopherson if (is_last_spte(iter.old_spte, iter.level)) 175885f44f8cSSean Christopherson continue; 175985f44f8cSSean Christopherson 176085f44f8cSSean Christopherson /* 176185f44f8cSSean Christopherson * If iter.gfn resides outside of the slot, i.e. the page for 176285f44f8cSSean Christopherson * the current level overlaps but is not contained by the slot, 176385f44f8cSSean Christopherson * then the SPTE can't be made huge. More importantly, trying 176485f44f8cSSean Christopherson * to query that info from slot->arch.lpage_info will cause an 176585f44f8cSSean Christopherson * out-of-bounds access. 176685f44f8cSSean Christopherson */ 176785f44f8cSSean Christopherson if (iter.gfn < start || iter.gfn >= end) 176814881998SBen Gardon continue; 176914881998SBen Gardon 17705ba7c4c6SBen Gardon max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, 1771a8ac499bSSean Christopherson iter.gfn, PG_LEVEL_NUM); 177285f44f8cSSean Christopherson if (max_mapping_level < iter.level) 17735ba7c4c6SBen Gardon continue; 17745ba7c4c6SBen Gardon 17754b85c921SSean Christopherson /* Note, a successful atomic zap also does a remote TLB flush. */ 177685f44f8cSSean Christopherson if (tdp_mmu_zap_spte_atomic(kvm, &iter)) 177785f44f8cSSean Christopherson goto retry; 17782db6f772SBen Gardon } 177914881998SBen Gardon 17807cca2d0bSBen Gardon rcu_read_unlock(); 178114881998SBen Gardon } 178214881998SBen Gardon 178314881998SBen Gardon /* 178485f44f8cSSean Christopherson * Zap non-leaf SPTEs (and free their associated page tables) which could 178585f44f8cSSean Christopherson * be replaced by huge pages, for GFNs within the slot. 178614881998SBen Gardon */ 17874b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 17884b85c921SSean Christopherson const struct kvm_memory_slot *slot) 178914881998SBen Gardon { 179014881998SBen Gardon struct kvm_mmu_page *root; 179114881998SBen Gardon 17922db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 179314881998SBen Gardon 1794d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 17954b85c921SSean Christopherson zap_collapsible_spte_range(kvm, root, slot); 179614881998SBen Gardon } 179746044f72SBen Gardon 179846044f72SBen Gardon /* 179946044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 18005fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 180146044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 180246044f72SBen Gardon */ 180346044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 18043ad93562SKeqian Zhu gfn_t gfn, int min_level) 180546044f72SBen Gardon { 180646044f72SBen Gardon struct tdp_iter iter; 180746044f72SBen Gardon u64 new_spte; 180846044f72SBen Gardon bool spte_set = false; 180946044f72SBen Gardon 18103ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 18113ad93562SKeqian Zhu 18127cca2d0bSBen Gardon rcu_read_lock(); 18137cca2d0bSBen Gardon 181477aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { 18153ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 18163ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 18173ad93562SKeqian Zhu continue; 18183ad93562SKeqian Zhu 181946044f72SBen Gardon new_spte = iter.old_spte & 18205fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 182146044f72SBen Gardon 18227c8a4742SDavid Matlack if (new_spte == iter.old_spte) 18237c8a4742SDavid Matlack break; 18247c8a4742SDavid Matlack 182546044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 182646044f72SBen Gardon spte_set = true; 182746044f72SBen Gardon } 182846044f72SBen Gardon 18297cca2d0bSBen Gardon rcu_read_unlock(); 18307cca2d0bSBen Gardon 183146044f72SBen Gardon return spte_set; 183246044f72SBen Gardon } 183346044f72SBen Gardon 183446044f72SBen Gardon /* 183546044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 18365fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 183746044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 183846044f72SBen Gardon */ 183946044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 18403ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 18413ad93562SKeqian Zhu int min_level) 184246044f72SBen Gardon { 184346044f72SBen Gardon struct kvm_mmu_page *root; 184446044f72SBen Gardon bool spte_set = false; 184546044f72SBen Gardon 1846531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1847a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 18483ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1849a3f15bdaSSean Christopherson 185046044f72SBen Gardon return spte_set; 185146044f72SBen Gardon } 185246044f72SBen Gardon 185395fb5b02SBen Gardon /* 185495fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 185595fb5b02SBen Gardon * That SPTE may be non-present. 1856c5c8c7c5SDavid Matlack * 1857c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 185895fb5b02SBen Gardon */ 185939b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 186039b4d43eSSean Christopherson int *root_level) 186195fb5b02SBen Gardon { 186295fb5b02SBen Gardon struct tdp_iter iter; 186395fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 186495fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 18652aa07893SSean Christopherson int leaf = -1; 186695fb5b02SBen Gardon 1867a972e29cSPaolo Bonzini *root_level = vcpu->arch.mmu->root_role.level; 186895fb5b02SBen Gardon 186995fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 187095fb5b02SBen Gardon leaf = iter.level; 1871dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 187295fb5b02SBen Gardon } 187395fb5b02SBen Gardon 187495fb5b02SBen Gardon return leaf; 187595fb5b02SBen Gardon } 18766e8eb206SDavid Matlack 18776e8eb206SDavid Matlack /* 18786e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 18796e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 18806e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 18816e8eb206SDavid Matlack * 18826e8eb206SDavid Matlack * Contract: 18836e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 18846e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 18856e8eb206SDavid Matlack * 18866e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 18876e8eb206SDavid Matlack */ 18886e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 18896e8eb206SDavid Matlack u64 *spte) 18906e8eb206SDavid Matlack { 18916e8eb206SDavid Matlack struct tdp_iter iter; 18926e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 18936e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 18946e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 18956e8eb206SDavid Matlack 18966e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 18976e8eb206SDavid Matlack *spte = iter.old_spte; 18986e8eb206SDavid Matlack sptep = iter.sptep; 18996e8eb206SDavid Matlack } 19006e8eb206SDavid Matlack 19016e8eb206SDavid Matlack /* 19026e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 19036e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 19046e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 19056e8eb206SDavid Matlack * annotation. 19066e8eb206SDavid Matlack * 19076e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 19086e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 19096e8eb206SDavid Matlack * outside of mmu_lock. 19106e8eb206SDavid Matlack */ 19116e8eb206SDavid Matlack return rcu_dereference(sptep); 19126e8eb206SDavid Matlack } 1913