1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17a1a39128SPaolo Bonzini int kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19a1a39128SPaolo Bonzini struct workqueue_struct *wq; 20a1a39128SPaolo Bonzini 21897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 22a1a39128SPaolo Bonzini return 0; 23a1a39128SPaolo Bonzini 24a1a39128SPaolo Bonzini wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); 25a1a39128SPaolo Bonzini if (!wq) 26a1a39128SPaolo Bonzini return -ENOMEM; 27fe5db27dSBen Gardon 28fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 29fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 3002c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 319a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 32a1a39128SPaolo Bonzini kvm->arch.tdp_mmu_zap_wq = wq; 33a1a39128SPaolo Bonzini return 1; 34fe5db27dSBen Gardon } 35fe5db27dSBen Gardon 36226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */ 37226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 386103bc07SBen Gardon bool shared) 396103bc07SBen Gardon { 406103bc07SBen Gardon if (shared) 416103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 426103bc07SBen Gardon else 436103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 44226b8c8fSSean Christopherson 45226b8c8fSSean Christopherson return true; 466103bc07SBen Gardon } 476103bc07SBen Gardon 48fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 49fe5db27dSBen Gardon { 50fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 51fe5db27dSBen Gardon return; 5202c00b3aSBen Gardon 533203a56aSLv Ruyi /* Also waits for any queued work items. */ 5422b94c4bSPaolo Bonzini destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); 5522b94c4bSPaolo Bonzini 56*d25ceb92SSean Christopherson WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); 5702c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 587cca2d0bSBen Gardon 597cca2d0bSBen Gardon /* 607cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 6122b94c4bSPaolo Bonzini * can run before the VM is torn down. Work items on tdp_mmu_zap_wq 6222b94c4bSPaolo Bonzini * can call kvm_tdp_mmu_put_root and create new callbacks. 637cca2d0bSBen Gardon */ 647cca2d0bSBen Gardon rcu_barrier(); 6502c00b3aSBen Gardon } 6602c00b3aSBen Gardon 672bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 68a889ea54SBen Gardon { 692bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 702bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 71a889ea54SBen Gardon } 72a889ea54SBen Gardon 73c0e64238SBen Gardon /* 74c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 75c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 76c0e64238SBen Gardon * the memory. 77c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 78c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 79c0e64238SBen Gardon * memory won't use it after it is freed. 80c0e64238SBen Gardon */ 81c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 82a889ea54SBen Gardon { 83c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 84c0e64238SBen Gardon rcu_head); 85a889ea54SBen Gardon 86c0e64238SBen Gardon tdp_mmu_free_sp(sp); 87a889ea54SBen Gardon } 88a889ea54SBen Gardon 89e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 90e2b5b21dSSean Christopherson bool shared); 91e2b5b21dSSean Christopherson 9222b94c4bSPaolo Bonzini static void tdp_mmu_zap_root_work(struct work_struct *work) 9322b94c4bSPaolo Bonzini { 9422b94c4bSPaolo Bonzini struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, 9522b94c4bSPaolo Bonzini tdp_mmu_async_work); 9622b94c4bSPaolo Bonzini struct kvm *kvm = root->tdp_mmu_async_data; 9722b94c4bSPaolo Bonzini 9822b94c4bSPaolo Bonzini read_lock(&kvm->mmu_lock); 9922b94c4bSPaolo Bonzini 10022b94c4bSPaolo Bonzini /* 10122b94c4bSPaolo Bonzini * A TLB flush is not necessary as KVM performs a local TLB flush when 10222b94c4bSPaolo Bonzini * allocating a new root (see kvm_mmu_load()), and when migrating vCPU 10322b94c4bSPaolo Bonzini * to a different pCPU. Note, the local TLB flush on reuse also 10422b94c4bSPaolo Bonzini * invalidates any paging-structure-cache entries, i.e. TLB entries for 10522b94c4bSPaolo Bonzini * intermediate paging structures, that may be zapped, as such entries 10622b94c4bSPaolo Bonzini * are associated with the ASID on both VMX and SVM. 10722b94c4bSPaolo Bonzini */ 10822b94c4bSPaolo Bonzini tdp_mmu_zap_root(kvm, root, true); 10922b94c4bSPaolo Bonzini 11022b94c4bSPaolo Bonzini /* 11122b94c4bSPaolo Bonzini * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for 11222b94c4bSPaolo Bonzini * avoiding an infinite loop. By design, the root is reachable while 11322b94c4bSPaolo Bonzini * it's being asynchronously zapped, thus a different task can put its 11422b94c4bSPaolo Bonzini * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an 11522b94c4bSPaolo Bonzini * asynchronously zapped root is unavoidable. 11622b94c4bSPaolo Bonzini */ 11722b94c4bSPaolo Bonzini kvm_tdp_mmu_put_root(kvm, root, true); 11822b94c4bSPaolo Bonzini 11922b94c4bSPaolo Bonzini read_unlock(&kvm->mmu_lock); 12022b94c4bSPaolo Bonzini } 12122b94c4bSPaolo Bonzini 12222b94c4bSPaolo Bonzini static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) 12322b94c4bSPaolo Bonzini { 12422b94c4bSPaolo Bonzini root->tdp_mmu_async_data = kvm; 12522b94c4bSPaolo Bonzini INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); 12622b94c4bSPaolo Bonzini queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); 12722b94c4bSPaolo Bonzini } 12822b94c4bSPaolo Bonzini 1298351779cSPaolo Bonzini static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) 1308351779cSPaolo Bonzini { 1318351779cSPaolo Bonzini union kvm_mmu_page_role role = page->role; 1328351779cSPaolo Bonzini role.invalid = true; 1338351779cSPaolo Bonzini 1348351779cSPaolo Bonzini /* No need to use cmpxchg, only the invalid bit can change. */ 1358351779cSPaolo Bonzini role.word = xchg(&page->role.word, role.word); 1368351779cSPaolo Bonzini return role.invalid; 1378351779cSPaolo Bonzini } 1388351779cSPaolo Bonzini 1396103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 1406103bc07SBen Gardon bool shared) 1412bdb3d84SBen Gardon { 1426103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1432bdb3d84SBen Gardon 14411cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 1452bdb3d84SBen Gardon return; 1462bdb3d84SBen Gardon 1472bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 1482bdb3d84SBen Gardon 1498351779cSPaolo Bonzini /* 1508351779cSPaolo Bonzini * The root now has refcount=0. It is valid, but readers already 1518351779cSPaolo Bonzini * cannot acquire a reference to it because kvm_tdp_mmu_get_root() 1528351779cSPaolo Bonzini * rejects it. This remains true for the rest of the execution 1538351779cSPaolo Bonzini * of this function, because readers visit valid roots only 1548351779cSPaolo Bonzini * (except for tdp_mmu_zap_root_work(), which however 1558351779cSPaolo Bonzini * does not acquire any reference itself). 1568351779cSPaolo Bonzini * 1578351779cSPaolo Bonzini * Even though there are flows that need to visit all roots for 1588351779cSPaolo Bonzini * correctness, they all take mmu_lock for write, so they cannot yet 1598351779cSPaolo Bonzini * run concurrently. The same is true after kvm_tdp_root_mark_invalid, 1608351779cSPaolo Bonzini * since the root still has refcount=0. 1618351779cSPaolo Bonzini * 1628351779cSPaolo Bonzini * However, tdp_mmu_zap_root can yield, and writers do not expect to 1638351779cSPaolo Bonzini * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). 1648351779cSPaolo Bonzini * So the root temporarily gets an extra reference, going to refcount=1 1658351779cSPaolo Bonzini * while staying invalid. Readers still cannot acquire any reference; 1668351779cSPaolo Bonzini * but writers are now allowed to run if tdp_mmu_zap_root yields and 167efd995daSPaolo Bonzini * they might take an extra reference if they themselves yield. 168efd995daSPaolo Bonzini * Therefore, when the reference is given back by the worker, 1698351779cSPaolo Bonzini * there is no guarantee that the refcount is still 1. If not, whoever 1708351779cSPaolo Bonzini * puts the last reference will free the page, but they will not have to 1718351779cSPaolo Bonzini * zap the root because a root cannot go from invalid to valid. 1728351779cSPaolo Bonzini */ 1738351779cSPaolo Bonzini if (!kvm_tdp_root_mark_invalid(root)) { 1748351779cSPaolo Bonzini refcount_set(&root->tdp_mmu_root_count, 1); 1758351779cSPaolo Bonzini 1768351779cSPaolo Bonzini /* 177efd995daSPaolo Bonzini * Zapping the root in a worker is not just "nice to have"; 178efd995daSPaolo Bonzini * it is required because kvm_tdp_mmu_invalidate_all_roots() 179efd995daSPaolo Bonzini * skips already-invalid roots. If kvm_tdp_mmu_put_root() did 180efd995daSPaolo Bonzini * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() 181efd995daSPaolo Bonzini * might return with some roots not zapped yet. 1828351779cSPaolo Bonzini */ 183efd995daSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 1848351779cSPaolo Bonzini return; 1858351779cSPaolo Bonzini } 1868351779cSPaolo Bonzini 187c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 188c0e64238SBen Gardon list_del_rcu(&root->link); 189c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 190c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 191a889ea54SBen Gardon } 192a889ea54SBen Gardon 193cfc10997SBen Gardon /* 194d62007edSSean Christopherson * Returns the next root after @prev_root (or the first root if @prev_root is 195d62007edSSean Christopherson * NULL). A reference to the returned root is acquired, and the reference to 196d62007edSSean Christopherson * @prev_root is released (the caller obviously must hold a reference to 197d62007edSSean Christopherson * @prev_root if it's non-NULL). 198d62007edSSean Christopherson * 199d62007edSSean Christopherson * If @only_valid is true, invalid roots are skipped. 200d62007edSSean Christopherson * 201d62007edSSean Christopherson * Returns NULL if the end of tdp_mmu_roots was reached. 202cfc10997SBen Gardon */ 203cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 2046103bc07SBen Gardon struct kvm_mmu_page *prev_root, 205d62007edSSean Christopherson bool shared, bool only_valid) 206a889ea54SBen Gardon { 207a889ea54SBen Gardon struct kvm_mmu_page *next_root; 208a889ea54SBen Gardon 209c0e64238SBen Gardon rcu_read_lock(); 210c0e64238SBen Gardon 211cfc10997SBen Gardon if (prev_root) 212c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 213c0e64238SBen Gardon &prev_root->link, 214c0e64238SBen Gardon typeof(*prev_root), link); 215cfc10997SBen Gardon else 216c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 217cfc10997SBen Gardon typeof(*next_root), link); 218cfc10997SBen Gardon 21904dc4e6cSSean Christopherson while (next_root) { 220d62007edSSean Christopherson if ((!only_valid || !next_root->role.invalid) && 221ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(next_root)) 22204dc4e6cSSean Christopherson break; 22304dc4e6cSSean Christopherson 224c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 225c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 22604dc4e6cSSean Christopherson } 227fb101293SBen Gardon 228c0e64238SBen Gardon rcu_read_unlock(); 229cfc10997SBen Gardon 230cfc10997SBen Gardon if (prev_root) 2316103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 232cfc10997SBen Gardon 233a889ea54SBen Gardon return next_root; 234a889ea54SBen Gardon } 235a889ea54SBen Gardon 236a889ea54SBen Gardon /* 237a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 238a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 239a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 240a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 2416103bc07SBen Gardon * 2426103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 2436103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 2446103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 245a889ea54SBen Gardon */ 246d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ 247d62007edSSean Christopherson for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ 248cfc10997SBen Gardon _root; \ 249d62007edSSean Christopherson _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ 250614f6970SPaolo Bonzini if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \ 251614f6970SPaolo Bonzini kvm_mmu_page_as_id(_root) != _as_id) { \ 252a3f15bdaSSean Christopherson } else 253a889ea54SBen Gardon 254d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 255d62007edSSean Christopherson __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) 256d62007edSSean Christopherson 257614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ 258614f6970SPaolo Bonzini __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) 259d62007edSSean Christopherson 260226b8c8fSSean Christopherson /* 261226b8c8fSSean Christopherson * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 262226b8c8fSSean Christopherson * the implication being that any flow that holds mmu_lock for read is 263226b8c8fSSean Christopherson * inherently yield-friendly and should use the yield-safe variant above. 264226b8c8fSSean Christopherson * Holding mmu_lock for write obviates the need for RCU protection as the list 265226b8c8fSSean Christopherson * is guaranteed to be stable. 266226b8c8fSSean Christopherson */ 267a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 268226b8c8fSSean Christopherson list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ 269226b8c8fSSean Christopherson if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ 270226b8c8fSSean Christopherson kvm_mmu_page_as_id(_root) != _as_id) { \ 271a3f15bdaSSean Christopherson } else 27202c00b3aSBen Gardon 273a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) 27402c00b3aSBen Gardon { 27502c00b3aSBen Gardon struct kvm_mmu_page *sp; 27602c00b3aSBen Gardon 27702c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 27802c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 279a82070b6SDavid Matlack 280a82070b6SDavid Matlack return sp; 281a82070b6SDavid Matlack } 282a82070b6SDavid Matlack 283c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, 284c10743a1SSean Christopherson gfn_t gfn, union kvm_mmu_page_role role) 285a82070b6SDavid Matlack { 28655c510e2SSean Christopherson INIT_LIST_HEAD(&sp->possible_nx_huge_page_link); 287428e9216SSean Christopherson 28802c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 28902c00b3aSBen Gardon 290a3aca4deSDavid Matlack sp->role = role; 29102c00b3aSBen Gardon sp->gfn = gfn; 292c10743a1SSean Christopherson sp->ptep = sptep; 29302c00b3aSBen Gardon sp->tdp_mmu_page = true; 29402c00b3aSBen Gardon 29533dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 29602c00b3aSBen Gardon } 29702c00b3aSBen Gardon 298a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, 299a3aca4deSDavid Matlack struct tdp_iter *iter) 300a3aca4deSDavid Matlack { 301a3aca4deSDavid Matlack struct kvm_mmu_page *parent_sp; 302a3aca4deSDavid Matlack union kvm_mmu_page_role role; 303a3aca4deSDavid Matlack 304a3aca4deSDavid Matlack parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); 305a3aca4deSDavid Matlack 306a3aca4deSDavid Matlack role = parent_sp->role; 307a3aca4deSDavid Matlack role.level--; 308a3aca4deSDavid Matlack 309c10743a1SSean Christopherson tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); 310a3aca4deSDavid Matlack } 311a3aca4deSDavid Matlack 3126e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 31302c00b3aSBen Gardon { 3147a458f0eSPaolo Bonzini union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; 31502c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 31602c00b3aSBen Gardon struct kvm_mmu_page *root; 31702c00b3aSBen Gardon 3186e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 31902c00b3aSBen Gardon 32004dc4e6cSSean Christopherson /* 32104dc4e6cSSean Christopherson * Check for an existing root before allocating a new one. Note, the 32204dc4e6cSSean Christopherson * role check prevents consuming an invalid root. 32304dc4e6cSSean Christopherson */ 324a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 325fb101293SBen Gardon if (root->role.word == role.word && 326ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(root)) 3276e6ec584SSean Christopherson goto out; 32802c00b3aSBen Gardon } 32902c00b3aSBen Gardon 330a82070b6SDavid Matlack root = tdp_mmu_alloc_sp(vcpu); 331c10743a1SSean Christopherson tdp_mmu_init_sp(root, NULL, 0, role); 332a82070b6SDavid Matlack 33311cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 33402c00b3aSBen Gardon 335c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 336c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 337c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 33802c00b3aSBen Gardon 3396e6ec584SSean Christopherson out: 34002c00b3aSBen Gardon return __pa(root->spt); 341fe5db27dSBen Gardon } 3422f2fad08SBen Gardon 3432f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3449a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 3459a77daacSBen Gardon bool shared); 3462f2fad08SBen Gardon 347f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 348f8e14497SBen Gardon { 349f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 350f8e14497SBen Gardon return; 351f8e14497SBen Gardon 352f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 35364bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 35464bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 355f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 356f8e14497SBen Gardon } 357f8e14497SBen Gardon 358a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 359a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 360a6a0b05dSBen Gardon { 361a6a0b05dSBen Gardon bool pfn_changed; 362a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 363a6a0b05dSBen Gardon 364a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 365a6a0b05dSBen Gardon return; 366a6a0b05dSBen Gardon 367a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 368a6a0b05dSBen Gardon 369a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 370a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 371a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 372fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 373a6a0b05dSBen Gardon } 374a6a0b05dSBen Gardon } 375a6a0b05dSBen Gardon 37643a063caSYosry Ahmed static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 37743a063caSYosry Ahmed { 37843a063caSYosry Ahmed kvm_account_pgtable_pages((void *)sp->spt, +1); 379*d25ceb92SSean Christopherson atomic64_inc(&kvm->arch.tdp_mmu_pages); 38043a063caSYosry Ahmed } 38143a063caSYosry Ahmed 38243a063caSYosry Ahmed static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) 38343a063caSYosry Ahmed { 38443a063caSYosry Ahmed kvm_account_pgtable_pages((void *)sp->spt, -1); 385*d25ceb92SSean Christopherson atomic64_dec(&kvm->arch.tdp_mmu_pages); 38643a063caSYosry Ahmed } 38743a063caSYosry Ahmed 3882f2fad08SBen Gardon /** 389c298a30cSDavid Matlack * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages 390a9442f59SBen Gardon * 391a9442f59SBen Gardon * @kvm: kvm instance 392a9442f59SBen Gardon * @sp: the page to be removed 3939a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 3949a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 3959a77daacSBen Gardon * threads that might be adding or removing pages. 396a9442f59SBen Gardon */ 397c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, 3989a77daacSBen Gardon bool shared) 399a9442f59SBen Gardon { 40043a063caSYosry Ahmed tdp_unaccount_mmu_page(kvm, sp); 401*d25ceb92SSean Christopherson 402*d25ceb92SSean Christopherson if (!sp->nx_huge_page_disallowed) 403*d25ceb92SSean Christopherson return; 404*d25ceb92SSean Christopherson 4059a77daacSBen Gardon if (shared) 4069a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 4079a77daacSBen Gardon else 408a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 409a9442f59SBen Gardon 41061f94478SSean Christopherson sp->nx_huge_page_disallowed = false; 41161f94478SSean Christopherson untrack_possible_nx_huge_page(kvm, sp); 4129a77daacSBen Gardon 4139a77daacSBen Gardon if (shared) 4149a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 415a9442f59SBen Gardon } 416a9442f59SBen Gardon 417a9442f59SBen Gardon /** 4180f53dfa3SDavid Matlack * handle_removed_pt() - handle a page table removed from the TDP structure 419a066e61fSBen Gardon * 420a066e61fSBen Gardon * @kvm: kvm instance 421a066e61fSBen Gardon * @pt: the page removed from the paging structure 4229a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 4239a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 4249a77daacSBen Gardon * threads that might be modifying SPTEs. 425a066e61fSBen Gardon * 426a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 427a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 42870fb3e41SBen Gardon * 42970fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 43070fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 43170fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 43270fb3e41SBen Gardon * early rcu_dereferences in the function. 433a066e61fSBen Gardon */ 4340f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) 435a066e61fSBen Gardon { 43670fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 437a066e61fSBen Gardon int level = sp->role.level; 438e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 439a066e61fSBen Gardon int i; 440a066e61fSBen Gardon 441a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 442a066e61fSBen Gardon 443c298a30cSDavid Matlack tdp_mmu_unlink_sp(kvm, sp, shared); 444a066e61fSBen Gardon 4452ca3129eSSean Christopherson for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { 446ba3a6120SSean Christopherson tdp_ptep_t sptep = pt + i; 447574c3c55SBen Gardon gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 448ba3a6120SSean Christopherson u64 old_spte; 4499a77daacSBen Gardon 4509a77daacSBen Gardon if (shared) { 451e25f0e0cSBen Gardon /* 452e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 453e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 454e25f0e0cSBen Gardon * already marked as removed then another thread 455e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 456e25f0e0cSBen Gardon * set the SPTE until it is set from some other 457e25f0e0cSBen Gardon * value to the removed SPTE value. 458e25f0e0cSBen Gardon */ 459e25f0e0cSBen Gardon for (;;) { 460ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); 461ba3a6120SSean Christopherson if (!is_removed_spte(old_spte)) 462e25f0e0cSBen Gardon break; 463e25f0e0cSBen Gardon cpu_relax(); 464e25f0e0cSBen Gardon } 4659a77daacSBen Gardon } else { 4668df9f1afSSean Christopherson /* 4678df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 4688df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 4698df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 4708df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 4718df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 4728df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 4738df9f1afSSean Christopherson * unreachable. 4748df9f1afSSean Christopherson */ 475ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_read_spte(sptep); 476ba3a6120SSean Christopherson if (!is_shadow_present_pte(old_spte)) 4778df9f1afSSean Christopherson continue; 478e25f0e0cSBen Gardon 479e25f0e0cSBen Gardon /* 480ba3a6120SSean Christopherson * Use the common helper instead of a raw WRITE_ONCE as 481ba3a6120SSean Christopherson * the SPTE needs to be updated atomically if it can be 482ba3a6120SSean Christopherson * modified by a different vCPU outside of mmu_lock. 483ba3a6120SSean Christopherson * Even though the parent SPTE is !PRESENT, the TLB 484ba3a6120SSean Christopherson * hasn't yet been flushed, and both Intel and AMD 485ba3a6120SSean Christopherson * document that A/D assists can use upper-level PxE 486ba3a6120SSean Christopherson * entries that are cached in the TLB, i.e. the CPU can 487ba3a6120SSean Christopherson * still access the page and mark it dirty. 488ba3a6120SSean Christopherson * 489ba3a6120SSean Christopherson * No retry is needed in the atomic update path as the 490ba3a6120SSean Christopherson * sole concern is dropping a Dirty bit, i.e. no other 491ba3a6120SSean Christopherson * task can zap/remove the SPTE as mmu_lock is held for 492ba3a6120SSean Christopherson * write. Marking the SPTE as a removed SPTE is not 493ba3a6120SSean Christopherson * strictly necessary for the same reason, but using 494ba3a6120SSean Christopherson * the remove SPTE value keeps the shared/exclusive 495ba3a6120SSean Christopherson * paths consistent and allows the handle_changed_spte() 496ba3a6120SSean Christopherson * call below to hardcode the new value to REMOVED_SPTE. 497ba3a6120SSean Christopherson * 498ba3a6120SSean Christopherson * Note, even though dropping a Dirty bit is the only 499ba3a6120SSean Christopherson * scenario where a non-atomic update could result in a 500ba3a6120SSean Christopherson * functional bug, simply checking the Dirty bit isn't 501ba3a6120SSean Christopherson * sufficient as a fast page fault could read the upper 502ba3a6120SSean Christopherson * level SPTE before it is zapped, and then make this 503ba3a6120SSean Christopherson * target SPTE writable, resume the guest, and set the 504ba3a6120SSean Christopherson * Dirty bit between reading the SPTE above and writing 505ba3a6120SSean Christopherson * it here. 506e25f0e0cSBen Gardon */ 507ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, 508ba3a6120SSean Christopherson REMOVED_SPTE, level); 5099a77daacSBen Gardon } 510e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 511ba3a6120SSean Christopherson old_spte, REMOVED_SPTE, level, shared); 512a066e61fSBen Gardon } 513a066e61fSBen Gardon 5147cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 515a066e61fSBen Gardon } 516a066e61fSBen Gardon 517a066e61fSBen Gardon /** 5187f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 5192f2fad08SBen Gardon * @kvm: kvm instance 5202f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 5212f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 5222f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 5232f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 5242f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 5259a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 5269a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 5279a77daacSBen Gardon * threads that might be modifying SPTEs. 5282f2fad08SBen Gardon * 5292f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 5302f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 5312f2fad08SBen Gardon */ 5322f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 5339a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 5349a77daacSBen Gardon bool shared) 5352f2fad08SBen Gardon { 5362f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 5372f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 5382f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 5392f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 5402f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 5412f2fad08SBen Gardon 5422f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 5432f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 544764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 5452f2fad08SBen Gardon 5462f2fad08SBen Gardon /* 5472f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 5482f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 5492f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 550d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 5512f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 5522f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 5532f2fad08SBen Gardon * thread before replacement. 5542f2fad08SBen Gardon */ 5552f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 5562f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 5572f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 5582f2fad08SBen Gardon "different PFN!\n" 5592f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5602f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5612f2fad08SBen Gardon 5622f2fad08SBen Gardon /* 5632f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 564d9f6e12fSIngo Molnar * corruption. 5652f2fad08SBen Gardon */ 5662f2fad08SBen Gardon BUG(); 5672f2fad08SBen Gardon } 5682f2fad08SBen Gardon 5692f2fad08SBen Gardon if (old_spte == new_spte) 5702f2fad08SBen Gardon return; 5712f2fad08SBen Gardon 572b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 573b9a98c34SBen Gardon 574115111efSDavid Matlack if (is_leaf) 575115111efSDavid Matlack check_spte_writable_invariants(new_spte); 576115111efSDavid Matlack 5772f2fad08SBen Gardon /* 5782f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 5792f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 5802f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 5812f2fad08SBen Gardon */ 5822f2fad08SBen Gardon if (!was_present && !is_present) { 5832f2fad08SBen Gardon /* 58408f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 58508f07c80SBen Gardon * it is unexpected. Log the change, though it should not 58608f07c80SBen Gardon * impact the guest since both the former and current SPTEs 58708f07c80SBen Gardon * are nonpresent. 5882f2fad08SBen Gardon */ 58908f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 59008f07c80SBen Gardon !is_mmio_spte(new_spte) && 59108f07c80SBen Gardon !is_removed_spte(new_spte))) 5922f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 5932f2fad08SBen Gardon "should not be replaced with another,\n" 5942f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 59508f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 59608f07c80SBen Gardon "a temporary removed SPTE.\n" 5972f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5982f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5992f2fad08SBen Gardon return; 6002f2fad08SBen Gardon } 6012f2fad08SBen Gardon 60271f51d2cSMingwei Zhang if (is_leaf != was_leaf) 60371f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 6042f2fad08SBen Gardon 6052f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 60664bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 6072f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 6082f2fad08SBen Gardon 6092f2fad08SBen Gardon /* 6102f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 611c8e5a0d0SSean Christopherson * the paging structure. Note the WARN on the PFN changing without the 612c8e5a0d0SSean Christopherson * SPTE being converted to a hugepage (leaf) or being zapped. Shadow 613c8e5a0d0SSean Christopherson * pages are kernel allocations and should never be migrated. 6142f2fad08SBen Gardon */ 615c8e5a0d0SSean Christopherson if (was_present && !was_leaf && 616c8e5a0d0SSean Christopherson (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) 6170f53dfa3SDavid Matlack handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); 6182f2fad08SBen Gardon } 6192f2fad08SBen Gardon 6202f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 6219a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 6229a77daacSBen Gardon bool shared) 6232f2fad08SBen Gardon { 6249a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 6259a77daacSBen Gardon shared); 626f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 627a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 628a6a0b05dSBen Gardon new_spte, level); 6292f2fad08SBen Gardon } 630faaf05b0SBen Gardon 631fe43fa2fSBen Gardon /* 6326ccf4438SPaolo Bonzini * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 6336ccf4438SPaolo Bonzini * and handle the associated bookkeeping. Do not mark the page dirty 63424ae4cfaSBen Gardon * in KVM's dirty bitmaps. 6359a77daacSBen Gardon * 6363255530aSDavid Matlack * If setting the SPTE fails because it has changed, iter->old_spte will be 6373255530aSDavid Matlack * refreshed to the current value of the spte. 6383255530aSDavid Matlack * 6399a77daacSBen Gardon * @kvm: kvm instance 6409a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 6419a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 6423e72c791SDavid Matlack * Return: 6433e72c791SDavid Matlack * * 0 - If the SPTE was set. 6443e72c791SDavid Matlack * * -EBUSY - If the SPTE cannot be set. In this case this function will have 6453e72c791SDavid Matlack * no side-effects other than setting iter->old_spte to the last 6463e72c791SDavid Matlack * known value of the spte. 6479a77daacSBen Gardon */ 6483e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, 6499a77daacSBen Gardon struct tdp_iter *iter, 6509a77daacSBen Gardon u64 new_spte) 6519a77daacSBen Gardon { 6523255530aSDavid Matlack u64 *sptep = rcu_dereference(iter->sptep); 6533255530aSDavid Matlack 654396fd74dSSean Christopherson /* 655396fd74dSSean Christopherson * The caller is responsible for ensuring the old SPTE is not a REMOVED 656396fd74dSSean Christopherson * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, 657396fd74dSSean Christopherson * and pre-checking before inserting a new SPTE is advantageous as it 658396fd74dSSean Christopherson * avoids unnecessary work. 659396fd74dSSean Christopherson */ 660396fd74dSSean Christopherson WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); 6613a0f64deSSean Christopherson 6629a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 6639a77daacSBen Gardon 66408f07c80SBen Gardon /* 6656e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 6666e8eb206SDavid Matlack * does not hold the mmu_lock. 6676e8eb206SDavid Matlack */ 668aee98a68SUros Bizjak if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) 6693e72c791SDavid Matlack return -EBUSY; 6709a77daacSBen Gardon 67124ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 67208889894SSean Christopherson new_spte, iter->level, true); 67324ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 6749a77daacSBen Gardon 6753e72c791SDavid Matlack return 0; 6769a77daacSBen Gardon } 6779a77daacSBen Gardon 6783e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, 67908f07c80SBen Gardon struct tdp_iter *iter) 68008f07c80SBen Gardon { 6813e72c791SDavid Matlack int ret; 6823e72c791SDavid Matlack 68308f07c80SBen Gardon /* 68408f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 68508f07c80SBen Gardon * non-present value. This will stop other threads from 68608f07c80SBen Gardon * immediately installing a present entry in its place 68708f07c80SBen Gardon * before the TLBs are flushed. 68808f07c80SBen Gardon */ 6893e72c791SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); 6903e72c791SDavid Matlack if (ret) 6913e72c791SDavid Matlack return ret; 69208f07c80SBen Gardon 69308f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 69408f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 69508f07c80SBen Gardon 69608f07c80SBen Gardon /* 697ba3a6120SSean Christopherson * No other thread can overwrite the removed SPTE as they must either 698ba3a6120SSean Christopherson * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not 699ba3a6120SSean Christopherson * overwrite the special removed SPTE value. No bookkeeping is needed 700ba3a6120SSean Christopherson * here since the SPTE is going from non-present to non-present. Use 701ba3a6120SSean Christopherson * the raw write helper to avoid an unnecessary check on volatile bits. 70208f07c80SBen Gardon */ 703ba3a6120SSean Christopherson __kvm_tdp_mmu_write_spte(iter->sptep, 0); 70408f07c80SBen Gardon 7053e72c791SDavid Matlack return 0; 70608f07c80SBen Gardon } 70708f07c80SBen Gardon 7089a77daacSBen Gardon 7099a77daacSBen Gardon /* 710fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 711626808d1SSean Christopherson * @kvm: KVM instance 712626808d1SSean Christopherson * @as_id: Address space ID, i.e. regular vs. SMM 713626808d1SSean Christopherson * @sptep: Pointer to the SPTE 714626808d1SSean Christopherson * @old_spte: The current value of the SPTE 715626808d1SSean Christopherson * @new_spte: The new value that will be set for the SPTE 716626808d1SSean Christopherson * @gfn: The base GFN that was (or will be) mapped by the SPTE 717626808d1SSean Christopherson * @level: The level _containing_ the SPTE (its parent PT's level) 718fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 719fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 720fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 721fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 722fe43fa2fSBen Gardon * double counted. 723fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 724fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 725fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 726fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 727fe43fa2fSBen Gardon * writes from being double counted. 728ba3a6120SSean Christopherson * 729ba3a6120SSean Christopherson * Returns the old SPTE value, which _may_ be different than @old_spte if the 730ba3a6120SSean Christopherson * SPTE had voldatile bits. 731fe43fa2fSBen Gardon */ 732ba3a6120SSean Christopherson static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, 733626808d1SSean Christopherson u64 old_spte, u64 new_spte, gfn_t gfn, int level, 734626808d1SSean Christopherson bool record_acc_track, bool record_dirty_log) 735faaf05b0SBen Gardon { 736531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 7373a9a4aa5SBen Gardon 73808f07c80SBen Gardon /* 739966da62aSSean Christopherson * No thread should be using this function to set SPTEs to or from the 74008f07c80SBen Gardon * temporary removed SPTE value. 74108f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 74208f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 74308f07c80SBen Gardon * use of the removed SPTE should not be necessary. 74408f07c80SBen Gardon */ 745626808d1SSean Christopherson WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); 74608f07c80SBen Gardon 747ba3a6120SSean Christopherson old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); 748faaf05b0SBen Gardon 749626808d1SSean Christopherson __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); 750626808d1SSean Christopherson 751f8e14497SBen Gardon if (record_acc_track) 752626808d1SSean Christopherson handle_changed_spte_acc_track(old_spte, new_spte, level); 753a6a0b05dSBen Gardon if (record_dirty_log) 754626808d1SSean Christopherson handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 755626808d1SSean Christopherson new_spte, level); 756ba3a6120SSean Christopherson return old_spte; 757626808d1SSean Christopherson } 758626808d1SSean Christopherson 759626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 760626808d1SSean Christopherson u64 new_spte, bool record_acc_track, 761626808d1SSean Christopherson bool record_dirty_log) 762626808d1SSean Christopherson { 763626808d1SSean Christopherson WARN_ON_ONCE(iter->yielded); 764626808d1SSean Christopherson 765ba3a6120SSean Christopherson iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, 766ba3a6120SSean Christopherson iter->old_spte, new_spte, 767ba3a6120SSean Christopherson iter->gfn, iter->level, 768626808d1SSean Christopherson record_acc_track, record_dirty_log); 769f8e14497SBen Gardon } 770f8e14497SBen Gardon 771f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 772f8e14497SBen Gardon u64 new_spte) 773f8e14497SBen Gardon { 774626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 775f8e14497SBen Gardon } 776f8e14497SBen Gardon 777f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 778f8e14497SBen Gardon struct tdp_iter *iter, 779f8e14497SBen Gardon u64 new_spte) 780f8e14497SBen Gardon { 781626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 782a6a0b05dSBen Gardon } 783a6a0b05dSBen Gardon 784a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 785a6a0b05dSBen Gardon struct tdp_iter *iter, 786a6a0b05dSBen Gardon u64 new_spte) 787a6a0b05dSBen Gardon { 788626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 789faaf05b0SBen Gardon } 790faaf05b0SBen Gardon 791faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 79277aa6075SDavid Matlack for_each_tdp_pte(_iter, _root, _start, _end) 793faaf05b0SBen Gardon 794f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 795f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 796f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 797f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 798f8e14497SBen Gardon continue; \ 799f8e14497SBen Gardon else 800f8e14497SBen Gardon 801bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 802b9e5603cSPaolo Bonzini for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) 803bb18842eSBen Gardon 804faaf05b0SBen Gardon /* 805e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 806e28a436cSBen Gardon * to the scheduler. 807e28a436cSBen Gardon * 808e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 809e139a34eSBen Gardon * TLB flush before yielding. 810e139a34eSBen Gardon * 8113a0f64deSSean Christopherson * If this function yields, iter->yielded is set and the caller must skip to 8123a0f64deSSean Christopherson * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 8133a0f64deSSean Christopherson * over the paging structures to allow the iterator to continue its traversal 8143a0f64deSSean Christopherson * from the paging structure root. 815e28a436cSBen Gardon * 8163a0f64deSSean Christopherson * Returns true if this function yielded. 817e28a436cSBen Gardon */ 8183a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 8193a0f64deSSean Christopherson struct tdp_iter *iter, 8203a0f64deSSean Christopherson bool flush, bool shared) 821a6a0b05dSBen Gardon { 8223a0f64deSSean Christopherson WARN_ON(iter->yielded); 8233a0f64deSSean Christopherson 824ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 825ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 826ed5e484bSBen Gardon return false; 827ed5e484bSBen Gardon 828531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 829e139a34eSBen Gardon if (flush) 830e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 831e139a34eSBen Gardon 832bd296779SSean Christopherson rcu_read_unlock(); 833bd296779SSean Christopherson 8346103bc07SBen Gardon if (shared) 8356103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 8366103bc07SBen Gardon else 837531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 8386103bc07SBen Gardon 8397cca2d0bSBen Gardon rcu_read_lock(); 840ed5e484bSBen Gardon 841ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 842ed5e484bSBen Gardon 8433a0f64deSSean Christopherson iter->yielded = true; 844a6a0b05dSBen Gardon } 845e28a436cSBen Gardon 8463a0f64deSSean Christopherson return iter->yielded; 847a6a0b05dSBen Gardon } 848a6a0b05dSBen Gardon 84986931ff7SSean Christopherson static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 850e2b5b21dSSean Christopherson { 851e2b5b21dSSean Christopherson /* 85286931ff7SSean Christopherson * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 85386931ff7SSean Christopherson * a gpa range that would exceed the max gfn, and KVM does not create 85486931ff7SSean Christopherson * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 85586931ff7SSean Christopherson * the slow emulation path every time. 856e2b5b21dSSean Christopherson */ 85786931ff7SSean Christopherson return kvm_mmu_max_gfn() + 1; 858e2b5b21dSSean Christopherson } 859e2b5b21dSSean Christopherson 8601b6043e8SSean Christopherson static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8611b6043e8SSean Christopherson bool shared, int zap_level) 862e2b5b21dSSean Christopherson { 863e2b5b21dSSean Christopherson struct tdp_iter iter; 864e2b5b21dSSean Christopherson 86586931ff7SSean Christopherson gfn_t end = tdp_mmu_max_gfn_exclusive(); 866e2b5b21dSSean Christopherson gfn_t start = 0; 867e2b5b21dSSean Christopherson 8681b6043e8SSean Christopherson for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { 8691b6043e8SSean Christopherson retry: 8701b6043e8SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 8711b6043e8SSean Christopherson continue; 8721b6043e8SSean Christopherson 8731b6043e8SSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 8741b6043e8SSean Christopherson continue; 8751b6043e8SSean Christopherson 8761b6043e8SSean Christopherson if (iter.level > zap_level) 8771b6043e8SSean Christopherson continue; 8781b6043e8SSean Christopherson 8791b6043e8SSean Christopherson if (!shared) 8801b6043e8SSean Christopherson tdp_mmu_set_spte(kvm, &iter, 0); 8811b6043e8SSean Christopherson else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) 8821b6043e8SSean Christopherson goto retry; 8831b6043e8SSean Christopherson } 8841b6043e8SSean Christopherson } 8851b6043e8SSean Christopherson 8861b6043e8SSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8871b6043e8SSean Christopherson bool shared) 8881b6043e8SSean Christopherson { 8891b6043e8SSean Christopherson 8908351779cSPaolo Bonzini /* 8918351779cSPaolo Bonzini * The root must have an elevated refcount so that it's reachable via 8928351779cSPaolo Bonzini * mmu_notifier callbacks, which allows this path to yield and drop 8938351779cSPaolo Bonzini * mmu_lock. When handling an unmap/release mmu_notifier command, KVM 8948351779cSPaolo Bonzini * must drop all references to relevant pages prior to completing the 8958351779cSPaolo Bonzini * callback. Dropping mmu_lock with an unreachable root would result 8968351779cSPaolo Bonzini * in zapping SPTEs after a relevant mmu_notifier callback completes 8978351779cSPaolo Bonzini * and lead to use-after-free as zapping a SPTE triggers "writeback" of 8988351779cSPaolo Bonzini * dirty accessed bits to the SPTE's associated struct page. 8998351779cSPaolo Bonzini */ 9008351779cSPaolo Bonzini WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); 9018351779cSPaolo Bonzini 902e2b5b21dSSean Christopherson kvm_lockdep_assert_mmu_lock_held(kvm, shared); 903e2b5b21dSSean Christopherson 904e2b5b21dSSean Christopherson rcu_read_lock(); 905e2b5b21dSSean Christopherson 906e2b5b21dSSean Christopherson /* 9071b6043e8SSean Christopherson * To avoid RCU stalls due to recursively removing huge swaths of SPs, 9081b6043e8SSean Christopherson * split the zap into two passes. On the first pass, zap at the 1gb 9091b6043e8SSean Christopherson * level, and then zap top-level SPs on the second pass. "1gb" is not 9101b6043e8SSean Christopherson * arbitrary, as KVM must be able to zap a 1gb shadow page without 9111b6043e8SSean Christopherson * inducing a stall to allow in-place replacement with a 1gb hugepage. 9121b6043e8SSean Christopherson * 9131b6043e8SSean Christopherson * Because zapping a SP recurses on its children, stepping down to 9141b6043e8SSean Christopherson * PG_LEVEL_4K in the iterator itself is unnecessary. 915e2b5b21dSSean Christopherson */ 9161b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); 9171b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, root->role.level); 918e2b5b21dSSean Christopherson 919e2b5b21dSSean Christopherson rcu_read_unlock(); 920e2b5b21dSSean Christopherson } 921e2b5b21dSSean Christopherson 922c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 923c10743a1SSean Christopherson { 924c10743a1SSean Christopherson u64 old_spte; 925c10743a1SSean Christopherson 926c10743a1SSean Christopherson /* 927c10743a1SSean Christopherson * This helper intentionally doesn't allow zapping a root shadow page, 928c10743a1SSean Christopherson * which doesn't have a parent page table and thus no associated entry. 929c10743a1SSean Christopherson */ 930c10743a1SSean Christopherson if (WARN_ON_ONCE(!sp->ptep)) 931c10743a1SSean Christopherson return false; 932c10743a1SSean Christopherson 933c10743a1SSean Christopherson old_spte = kvm_tdp_mmu_read_spte(sp->ptep); 934bb95dfb9SSean Christopherson if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) 935c10743a1SSean Christopherson return false; 936c10743a1SSean Christopherson 937c10743a1SSean Christopherson __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, 938c10743a1SSean Christopherson sp->gfn, sp->role.level + 1, true, true); 939c10743a1SSean Christopherson 940c10743a1SSean Christopherson return true; 941c10743a1SSean Christopherson } 942c10743a1SSean Christopherson 943faaf05b0SBen Gardon /* 944063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 945063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 946063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 947063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 9486103bc07SBen Gardon * operation can cause a soft lockup. 949faaf05b0SBen Gardon */ 950f47e5bbbSSean Christopherson static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, 951acbda82aSSean Christopherson gfn_t start, gfn_t end, bool can_yield, bool flush) 952faaf05b0SBen Gardon { 953faaf05b0SBen Gardon struct tdp_iter iter; 954faaf05b0SBen Gardon 95586931ff7SSean Christopherson end = min(end, tdp_mmu_max_gfn_exclusive()); 956524a1e4eSSean Christopherson 957acbda82aSSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 9586103bc07SBen Gardon 9597cca2d0bSBen Gardon rcu_read_lock(); 9607cca2d0bSBen Gardon 961f47e5bbbSSean Christopherson for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { 9621af4a960SBen Gardon if (can_yield && 963acbda82aSSean Christopherson tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { 964a835429cSSean Christopherson flush = false; 9651af4a960SBen Gardon continue; 9661af4a960SBen Gardon } 9671af4a960SBen Gardon 968f47e5bbbSSean Christopherson if (!is_shadow_present_pte(iter.old_spte) || 969faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 970faaf05b0SBen Gardon continue; 971faaf05b0SBen Gardon 972faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 973a835429cSSean Christopherson flush = true; 974faaf05b0SBen Gardon } 9757cca2d0bSBen Gardon 9767cca2d0bSBen Gardon rcu_read_unlock(); 977bb95dfb9SSean Christopherson 978f47e5bbbSSean Christopherson /* 979f47e5bbbSSean Christopherson * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need 980f47e5bbbSSean Christopherson * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. 981f47e5bbbSSean Christopherson */ 982f47e5bbbSSean Christopherson return flush; 983faaf05b0SBen Gardon } 984faaf05b0SBen Gardon 985faaf05b0SBen Gardon /* 9867edc3a68SKai Huang * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns 9877edc3a68SKai Huang * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or 9887edc3a68SKai Huang * more SPTEs were zapped since the MMU lock was last acquired. 989faaf05b0SBen Gardon */ 990f47e5bbbSSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, 991f47e5bbbSSean Christopherson bool can_yield, bool flush) 992faaf05b0SBen Gardon { 993faaf05b0SBen Gardon struct kvm_mmu_page *root; 994faaf05b0SBen Gardon 995614f6970SPaolo Bonzini for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) 996f47e5bbbSSean Christopherson flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); 997faaf05b0SBen Gardon 998faaf05b0SBen Gardon return flush; 999faaf05b0SBen Gardon } 1000faaf05b0SBen Gardon 1001faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 1002faaf05b0SBen Gardon { 1003e2b5b21dSSean Christopherson struct kvm_mmu_page *root; 10042b9663d8SSean Christopherson int i; 1005faaf05b0SBen Gardon 100677c8cd6bSSean Christopherson /* 100722b94c4bSPaolo Bonzini * Zap all roots, including invalid roots, as all SPTEs must be dropped 100822b94c4bSPaolo Bonzini * before returning to the caller. Zap directly even if the root is 100922b94c4bSPaolo Bonzini * also being zapped by a worker. Walking zapped top-level SPTEs isn't 101022b94c4bSPaolo Bonzini * all that expensive and mmu_lock is already held, which means the 101122b94c4bSPaolo Bonzini * worker has yielded, i.e. flushing the work instead of zapping here 101222b94c4bSPaolo Bonzini * isn't guaranteed to be any faster. 101322b94c4bSPaolo Bonzini * 101477c8cd6bSSean Christopherson * A TLB flush is unnecessary, KVM zaps everything if and only the VM 101577c8cd6bSSean Christopherson * is being destroyed or the userspace VMM has exited. In both cases, 101677c8cd6bSSean Christopherson * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. 101777c8cd6bSSean Christopherson */ 1018e2b5b21dSSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1019e2b5b21dSSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, i) 1020e2b5b21dSSean Christopherson tdp_mmu_zap_root(kvm, root, false); 1021e2b5b21dSSean Christopherson } 1022faaf05b0SBen Gardon } 1023bb18842eSBen Gardon 10244c6654bdSBen Gardon /* 1025f28e9c7fSSean Christopherson * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast 102622b94c4bSPaolo Bonzini * zap" completes. 10274c6654bdSBen Gardon */ 10284c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 10294c6654bdSBen Gardon { 103022b94c4bSPaolo Bonzini flush_workqueue(kvm->arch.tdp_mmu_zap_wq); 10314c6654bdSBen Gardon } 10324c6654bdSBen Gardon 1033bb18842eSBen Gardon /* 1034f28e9c7fSSean Christopherson * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that 103522b94c4bSPaolo Bonzini * is about to be zapped, e.g. in response to a memslots update. The actual 103622b94c4bSPaolo Bonzini * zapping is performed asynchronously, so a reference is taken on all roots. 103722b94c4bSPaolo Bonzini * Using a separate workqueue makes it easy to ensure that the destruction is 103822b94c4bSPaolo Bonzini * performed before the "fast zap" completes, without keeping a separate list 103922b94c4bSPaolo Bonzini * of invalidated roots; the list is effectively the list of work items in 104022b94c4bSPaolo Bonzini * the workqueue. 1041b7cccd39SBen Gardon * 104222b94c4bSPaolo Bonzini * Get a reference even if the root is already invalid, the asynchronous worker 104322b94c4bSPaolo Bonzini * assumes it was gifted a reference to the root it processes. Because mmu_lock 104422b94c4bSPaolo Bonzini * is held for write, it should be impossible to observe a root with zero refcount, 104522b94c4bSPaolo Bonzini * i.e. the list of roots cannot be stale. 10464c6654bdSBen Gardon * 1047b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 1048b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 1049b7cccd39SBen Gardon */ 1050b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 1051b7cccd39SBen Gardon { 1052b7cccd39SBen Gardon struct kvm_mmu_page *root; 1053b7cccd39SBen Gardon 1054b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1055f28e9c7fSSean Christopherson list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { 1056efd995daSPaolo Bonzini if (!root->role.invalid && 1057efd995daSPaolo Bonzini !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { 1058b7cccd39SBen Gardon root->role.invalid = true; 105922b94c4bSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 106022b94c4bSPaolo Bonzini } 1061b7cccd39SBen Gardon } 1062f28e9c7fSSean Christopherson } 1063b7cccd39SBen Gardon 1064bb18842eSBen Gardon /* 1065bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 1066bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 1067bb18842eSBen Gardon */ 1068cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 1069cdc47767SPaolo Bonzini struct kvm_page_fault *fault, 1070cdc47767SPaolo Bonzini struct tdp_iter *iter) 1071bb18842eSBen Gardon { 1072c435d4b7SSean Christopherson struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 1073bb18842eSBen Gardon u64 new_spte; 107457a3e96dSKai Huang int ret = RET_PF_FIXED; 1075ad67e480SPaolo Bonzini bool wrprot = false; 1076bb18842eSBen Gardon 10777158bee4SPaolo Bonzini WARN_ON(sp->role.level != fault->goal_level); 1078e710c5f6SDavid Matlack if (unlikely(!fault->slot)) 1079bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 10809a77daacSBen Gardon else 108153597858SDavid Matlack wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 10822839180cSPaolo Bonzini fault->pfn, iter->old_spte, fault->prefetch, true, 10837158bee4SPaolo Bonzini fault->map_writable, &new_spte); 1084bb18842eSBen Gardon 1085bb18842eSBen Gardon if (new_spte == iter->old_spte) 1086bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 10873e72c791SDavid Matlack else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 10889a77daacSBen Gardon return RET_PF_RETRY; 1089bb95dfb9SSean Christopherson else if (is_shadow_present_pte(iter->old_spte) && 1090bb95dfb9SSean Christopherson !is_last_spte(iter->old_spte, iter->level)) 1091bb95dfb9SSean Christopherson kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, 1092bb95dfb9SSean Christopherson KVM_PAGES_PER_HPAGE(iter->level + 1)); 1093bb18842eSBen Gardon 1094bb18842eSBen Gardon /* 1095bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 1096bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 1097bb18842eSBen Gardon * the vCPU would have the same fault again. 1098bb18842eSBen Gardon */ 1099ad67e480SPaolo Bonzini if (wrprot) { 1100cdc47767SPaolo Bonzini if (fault->write) 1101bb18842eSBen Gardon ret = RET_PF_EMULATE; 1102bb18842eSBen Gardon } 1103bb18842eSBen Gardon 1104bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 11059a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 11061075d41eSSean Christopherson vcpu->stat.pf_mmio_spte_created++; 11079a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 11089a77daacSBen Gardon new_spte); 1109bb18842eSBen Gardon ret = RET_PF_EMULATE; 11103849e092SSean Christopherson } else { 11119a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 11129a77daacSBen Gardon rcu_dereference(iter->sptep)); 11133849e092SSean Christopherson } 1114bb18842eSBen Gardon 1115bb18842eSBen Gardon return ret; 1116bb18842eSBen Gardon } 1117bb18842eSBen Gardon 1118bb18842eSBen Gardon /* 1119cb00a70bSDavid Matlack * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1120cb00a70bSDavid Matlack * provided page table. 11217b7e1ab6SDavid Matlack * 11227b7e1ab6SDavid Matlack * @kvm: kvm instance 11237b7e1ab6SDavid Matlack * @iter: a tdp_iter instance currently on the SPTE that should be set 11247b7e1ab6SDavid Matlack * @sp: The new TDP page table to install. 1125cb00a70bSDavid Matlack * @shared: This operation is running under the MMU lock in read mode. 11267b7e1ab6SDavid Matlack * 11277b7e1ab6SDavid Matlack * Returns: 0 if the new page table was installed. Non-0 if the page table 11287b7e1ab6SDavid Matlack * could not be installed (e.g. the atomic compare-exchange failed). 11297b7e1ab6SDavid Matlack */ 1130cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, 113161f94478SSean Christopherson struct kvm_mmu_page *sp, bool shared) 11327b7e1ab6SDavid Matlack { 113354275f74SSean Christopherson u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); 1134cb00a70bSDavid Matlack int ret = 0; 11357b7e1ab6SDavid Matlack 1136cb00a70bSDavid Matlack if (shared) { 11377b7e1ab6SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); 11387b7e1ab6SDavid Matlack if (ret) 11397b7e1ab6SDavid Matlack return ret; 1140cb00a70bSDavid Matlack } else { 1141cb00a70bSDavid Matlack tdp_mmu_set_spte(kvm, iter, spte); 1142cb00a70bSDavid Matlack } 11437b7e1ab6SDavid Matlack 114443a063caSYosry Ahmed tdp_account_mmu_page(kvm, sp); 11457b7e1ab6SDavid Matlack 11467b7e1ab6SDavid Matlack return 0; 11477b7e1ab6SDavid Matlack } 11487b7e1ab6SDavid Matlack 11497b7e1ab6SDavid Matlack /* 1150bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 1151bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 1152bb18842eSBen Gardon */ 11532f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 1154bb18842eSBen Gardon { 1155bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 115661f94478SSean Christopherson struct kvm *kvm = vcpu->kvm; 1157bb18842eSBen Gardon struct tdp_iter iter; 115889c0fd49SBen Gardon struct kvm_mmu_page *sp; 1159bb18842eSBen Gardon int ret; 1160bb18842eSBen Gardon 116173a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault); 1162bb18842eSBen Gardon 1163f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault); 11647cca2d0bSBen Gardon 11657cca2d0bSBen Gardon rcu_read_lock(); 11667cca2d0bSBen Gardon 11672f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 116873a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled) 1169536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 1170bb18842eSBen Gardon 117173a3c659SPaolo Bonzini if (iter.level == fault->goal_level) 1172bb18842eSBen Gardon break; 1173bb18842eSBen Gardon 1174bb18842eSBen Gardon /* 1175bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 1176bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 1177bb18842eSBen Gardon * with a non-leaf SPTE. 1178bb18842eSBen Gardon */ 1179bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 1180bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 11813e72c791SDavid Matlack if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 11829a77daacSBen Gardon break; 1183bb18842eSBen Gardon 1184bb18842eSBen Gardon /* 1185bb18842eSBen Gardon * The iter must explicitly re-read the spte here 1186bb18842eSBen Gardon * because the new value informs the !present 1187bb18842eSBen Gardon * path below. 1188bb18842eSBen Gardon */ 11890e587aa7SSean Christopherson iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep); 1190bb18842eSBen Gardon } 1191bb18842eSBen Gardon 1192bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 1193ff76d506SKai Huang /* 1194c4342633SIngo Molnar * If SPTE has been frozen by another thread, just 1195ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 1196ff76d506SKai Huang * allocation and free. 1197ff76d506SKai Huang */ 1198ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1199ff76d506SKai Huang break; 1200ff76d506SKai Huang 1201a82070b6SDavid Matlack sp = tdp_mmu_alloc_sp(vcpu); 1202a82070b6SDavid Matlack tdp_mmu_init_child_sp(sp, &iter); 1203a82070b6SDavid Matlack 120461f94478SSean Christopherson sp->nx_huge_page_disallowed = fault->huge_page_disallowed; 120561f94478SSean Christopherson 120661f94478SSean Christopherson if (tdp_mmu_link_sp(kvm, &iter, sp, true)) { 12079a77daacSBen Gardon tdp_mmu_free_sp(sp); 12089a77daacSBen Gardon break; 12099a77daacSBen Gardon } 121061f94478SSean Christopherson 121161f94478SSean Christopherson if (fault->huge_page_disallowed && 121261f94478SSean Christopherson fault->req_level >= iter.level) { 121361f94478SSean Christopherson spin_lock(&kvm->arch.tdp_mmu_pages_lock); 121461f94478SSean Christopherson track_possible_nx_huge_page(kvm, sp); 121561f94478SSean Christopherson spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 121661f94478SSean Christopherson } 1217bb18842eSBen Gardon } 1218bb18842eSBen Gardon } 1219bb18842eSBen Gardon 122058298b06SSean Christopherson /* 122158298b06SSean Christopherson * Force the guest to retry the access if the upper level SPTEs aren't 122258298b06SSean Christopherson * in place, or if the target leaf SPTE is frozen by another CPU. 122358298b06SSean Christopherson */ 122458298b06SSean Christopherson if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) { 12257cca2d0bSBen Gardon rcu_read_unlock(); 1226bb18842eSBen Gardon return RET_PF_RETRY; 12277cca2d0bSBen Gardon } 1228bb18842eSBen Gardon 1229cdc47767SPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 12307cca2d0bSBen Gardon rcu_read_unlock(); 1231bb18842eSBen Gardon 1232bb18842eSBen Gardon return ret; 1233bb18842eSBen Gardon } 1234063afacdSBen Gardon 12353039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 12363039bcc7SSean Christopherson bool flush) 1237063afacdSBen Gardon { 1238f47e5bbbSSean Christopherson return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, 123983b83a02SSean Christopherson range->end, range->may_block, flush); 12403039bcc7SSean Christopherson } 12413039bcc7SSean Christopherson 12423039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 12433039bcc7SSean Christopherson struct kvm_gfn_range *range); 12443039bcc7SSean Christopherson 12453039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 12463039bcc7SSean Christopherson struct kvm_gfn_range *range, 1247c1b91493SSean Christopherson tdp_handler_t handler) 1248063afacdSBen Gardon { 1249063afacdSBen Gardon struct kvm_mmu_page *root; 12503039bcc7SSean Christopherson struct tdp_iter iter; 12513039bcc7SSean Christopherson bool ret = false; 1252063afacdSBen Gardon 1253063afacdSBen Gardon /* 1254e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1255e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1256063afacdSBen Gardon */ 12573039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 1258a151acecSSean Christopherson rcu_read_lock(); 1259a151acecSSean Christopherson 12603039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 12613039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 1262063afacdSBen Gardon 12633039bcc7SSean Christopherson rcu_read_unlock(); 1264a151acecSSean Christopherson } 1265063afacdSBen Gardon 1266063afacdSBen Gardon return ret; 1267063afacdSBen Gardon } 1268063afacdSBen Gardon 1269f8e14497SBen Gardon /* 1270f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1271f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1272f8e14497SBen Gardon */ 12733039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 12743039bcc7SSean Christopherson struct kvm_gfn_range *range) 1275f8e14497SBen Gardon { 1276f8e14497SBen Gardon u64 new_spte = 0; 1277f8e14497SBen Gardon 12783039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 12793039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 12803039bcc7SSean Christopherson return false; 12817cca2d0bSBen Gardon 12823039bcc7SSean Christopherson new_spte = iter->old_spte; 1283f8e14497SBen Gardon 1284f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 12858f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1286f8e14497SBen Gardon } else { 1287f8e14497SBen Gardon /* 1288f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1289f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1290f8e14497SBen Gardon */ 1291f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1292f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1293f8e14497SBen Gardon 1294f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1295f8e14497SBen Gardon } 1296f8e14497SBen Gardon 12973039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 129833dd3574SBen Gardon 12993039bcc7SSean Christopherson return true; 1300f8e14497SBen Gardon } 1301f8e14497SBen Gardon 13023039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1303f8e14497SBen Gardon { 13043039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1305f8e14497SBen Gardon } 1306f8e14497SBen Gardon 13073039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 13083039bcc7SSean Christopherson struct kvm_gfn_range *range) 1309f8e14497SBen Gardon { 13103039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1311f8e14497SBen Gardon } 1312f8e14497SBen Gardon 13133039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1314f8e14497SBen Gardon { 13153039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 13163039bcc7SSean Christopherson } 13173039bcc7SSean Christopherson 13183039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 13193039bcc7SSean Christopherson struct kvm_gfn_range *range) 13203039bcc7SSean Christopherson { 13213039bcc7SSean Christopherson u64 new_spte; 13223039bcc7SSean Christopherson 13233039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 13243039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 13253039bcc7SSean Christopherson 13263039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 13273039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 13283039bcc7SSean Christopherson return false; 13293039bcc7SSean Christopherson 13303039bcc7SSean Christopherson /* 13313039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 13323039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 13333039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 13343039bcc7SSean Christopherson * See __handle_changed_spte(). 13353039bcc7SSean Christopherson */ 13363039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 13373039bcc7SSean Christopherson 13383039bcc7SSean Christopherson if (!pte_write(range->pte)) { 13393039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 13403039bcc7SSean Christopherson pte_pfn(range->pte)); 13413039bcc7SSean Christopherson 13423039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 13433039bcc7SSean Christopherson } 13443039bcc7SSean Christopherson 13453039bcc7SSean Christopherson return true; 1346f8e14497SBen Gardon } 13471d8dd6b3SBen Gardon 13481d8dd6b3SBen Gardon /* 13491d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 13501d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 13511d8dd6b3SBen Gardon * notifier. 13521d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 13531d8dd6b3SBen Gardon */ 13543039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 13551d8dd6b3SBen Gardon { 135693fa50f6SSean Christopherson /* 135793fa50f6SSean Christopherson * No need to handle the remote TLB flush under RCU protection, the 135893fa50f6SSean Christopherson * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a 135993fa50f6SSean Christopherson * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). 136093fa50f6SSean Christopherson */ 136193fa50f6SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 13621d8dd6b3SBen Gardon } 13631d8dd6b3SBen Gardon 1364a6a0b05dSBen Gardon /* 1365bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1366bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1367bedd9195SDavid Matlack * be flushed. 1368a6a0b05dSBen Gardon */ 1369a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1370a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1371a6a0b05dSBen Gardon { 1372a6a0b05dSBen Gardon struct tdp_iter iter; 1373a6a0b05dSBen Gardon u64 new_spte; 1374a6a0b05dSBen Gardon bool spte_set = false; 1375a6a0b05dSBen Gardon 13767cca2d0bSBen Gardon rcu_read_lock(); 13777cca2d0bSBen Gardon 1378a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1379a6a0b05dSBen Gardon 138077aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, start, end) { 138124ae4cfaSBen Gardon retry: 138224ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 13831af4a960SBen Gardon continue; 13841af4a960SBen Gardon 1385a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 13860f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 13870f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1388a6a0b05dSBen Gardon continue; 1389a6a0b05dSBen Gardon 1390a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1391a6a0b05dSBen Gardon 13923e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 139324ae4cfaSBen Gardon goto retry; 13943255530aSDavid Matlack 1395a6a0b05dSBen Gardon spte_set = true; 1396a6a0b05dSBen Gardon } 13977cca2d0bSBen Gardon 13987cca2d0bSBen Gardon rcu_read_unlock(); 1399a6a0b05dSBen Gardon return spte_set; 1400a6a0b05dSBen Gardon } 1401a6a0b05dSBen Gardon 1402a6a0b05dSBen Gardon /* 1403a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1404a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1405a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1406a6a0b05dSBen Gardon */ 1407269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1408269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1409a6a0b05dSBen Gardon { 1410a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1411a6a0b05dSBen Gardon bool spte_set = false; 1412a6a0b05dSBen Gardon 141324ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1414a6a0b05dSBen Gardon 1415d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1416a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1417a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1418a6a0b05dSBen Gardon 1419a6a0b05dSBen Gardon return spte_set; 1420a6a0b05dSBen Gardon } 1421a6a0b05dSBen Gardon 1422a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) 1423a3fe5dbdSDavid Matlack { 1424a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1425a3fe5dbdSDavid Matlack 1426a3fe5dbdSDavid Matlack gfp |= __GFP_ZERO; 1427a3fe5dbdSDavid Matlack 1428a3fe5dbdSDavid Matlack sp = kmem_cache_alloc(mmu_page_header_cache, gfp); 1429a3fe5dbdSDavid Matlack if (!sp) 1430a3fe5dbdSDavid Matlack return NULL; 1431a3fe5dbdSDavid Matlack 1432a3fe5dbdSDavid Matlack sp->spt = (void *)__get_free_page(gfp); 1433a3fe5dbdSDavid Matlack if (!sp->spt) { 1434a3fe5dbdSDavid Matlack kmem_cache_free(mmu_page_header_cache, sp); 1435a3fe5dbdSDavid Matlack return NULL; 1436a3fe5dbdSDavid Matlack } 1437a3fe5dbdSDavid Matlack 1438a3fe5dbdSDavid Matlack return sp; 1439a3fe5dbdSDavid Matlack } 1440a3fe5dbdSDavid Matlack 1441a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, 1442cb00a70bSDavid Matlack struct tdp_iter *iter, 1443cb00a70bSDavid Matlack bool shared) 1444a3fe5dbdSDavid Matlack { 1445a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1446a3fe5dbdSDavid Matlack 1447a3fe5dbdSDavid Matlack /* 1448a3fe5dbdSDavid Matlack * Since we are allocating while under the MMU lock we have to be 1449a3fe5dbdSDavid Matlack * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct 1450a3fe5dbdSDavid Matlack * reclaim and to avoid making any filesystem callbacks (which can end 1451a3fe5dbdSDavid Matlack * up invoking KVM MMU notifiers, resulting in a deadlock). 1452a3fe5dbdSDavid Matlack * 1453a3fe5dbdSDavid Matlack * If this allocation fails we drop the lock and retry with reclaim 1454a3fe5dbdSDavid Matlack * allowed. 1455a3fe5dbdSDavid Matlack */ 1456a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); 1457a3fe5dbdSDavid Matlack if (sp) 1458a3fe5dbdSDavid Matlack return sp; 1459a3fe5dbdSDavid Matlack 1460a3fe5dbdSDavid Matlack rcu_read_unlock(); 1461cb00a70bSDavid Matlack 1462cb00a70bSDavid Matlack if (shared) 1463a3fe5dbdSDavid Matlack read_unlock(&kvm->mmu_lock); 1464cb00a70bSDavid Matlack else 1465cb00a70bSDavid Matlack write_unlock(&kvm->mmu_lock); 1466a3fe5dbdSDavid Matlack 1467a3fe5dbdSDavid Matlack iter->yielded = true; 1468a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); 1469a3fe5dbdSDavid Matlack 1470cb00a70bSDavid Matlack if (shared) 1471a3fe5dbdSDavid Matlack read_lock(&kvm->mmu_lock); 1472cb00a70bSDavid Matlack else 1473cb00a70bSDavid Matlack write_lock(&kvm->mmu_lock); 1474cb00a70bSDavid Matlack 1475a3fe5dbdSDavid Matlack rcu_read_lock(); 1476a3fe5dbdSDavid Matlack 1477a3fe5dbdSDavid Matlack return sp; 1478a3fe5dbdSDavid Matlack } 1479a3fe5dbdSDavid Matlack 1480cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1481cb00a70bSDavid Matlack struct kvm_mmu_page *sp, bool shared) 1482a3fe5dbdSDavid Matlack { 1483a3fe5dbdSDavid Matlack const u64 huge_spte = iter->old_spte; 1484a3fe5dbdSDavid Matlack const int level = iter->level; 1485a3fe5dbdSDavid Matlack int ret, i; 1486a3fe5dbdSDavid Matlack 1487a3fe5dbdSDavid Matlack tdp_mmu_init_child_sp(sp, iter); 1488a3fe5dbdSDavid Matlack 1489a3fe5dbdSDavid Matlack /* 1490a3fe5dbdSDavid Matlack * No need for atomics when writing to sp->spt since the page table has 1491a3fe5dbdSDavid Matlack * not been linked in yet and thus is not reachable from any other CPU. 1492a3fe5dbdSDavid Matlack */ 14932ca3129eSSean Christopherson for (i = 0; i < SPTE_ENT_PER_PAGE; i++) 149447855da0SDavid Matlack sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); 1495a3fe5dbdSDavid Matlack 1496a3fe5dbdSDavid Matlack /* 1497a3fe5dbdSDavid Matlack * Replace the huge spte with a pointer to the populated lower level 1498a3fe5dbdSDavid Matlack * page table. Since we are making this change without a TLB flush vCPUs 1499a3fe5dbdSDavid Matlack * will see a mix of the split mappings and the original huge mapping, 1500a3fe5dbdSDavid Matlack * depending on what's currently in their TLB. This is fine from a 1501a3fe5dbdSDavid Matlack * correctness standpoint since the translation will be the same either 1502a3fe5dbdSDavid Matlack * way. 1503a3fe5dbdSDavid Matlack */ 150461f94478SSean Christopherson ret = tdp_mmu_link_sp(kvm, iter, sp, shared); 1505a3fe5dbdSDavid Matlack if (ret) 1506e0b728b1SDavid Matlack goto out; 1507a3fe5dbdSDavid Matlack 1508a3fe5dbdSDavid Matlack /* 1509a3fe5dbdSDavid Matlack * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we 1510a3fe5dbdSDavid Matlack * are overwriting from the page stats. But we have to manually update 1511a3fe5dbdSDavid Matlack * the page stats with the new present child pages. 1512a3fe5dbdSDavid Matlack */ 15132ca3129eSSean Christopherson kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); 1514a3fe5dbdSDavid Matlack 1515e0b728b1SDavid Matlack out: 1516e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); 1517e0b728b1SDavid Matlack return ret; 1518a3fe5dbdSDavid Matlack } 1519a3fe5dbdSDavid Matlack 1520a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, 1521a3fe5dbdSDavid Matlack struct kvm_mmu_page *root, 1522a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1523cb00a70bSDavid Matlack int target_level, bool shared) 1524a3fe5dbdSDavid Matlack { 1525a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp = NULL; 1526a3fe5dbdSDavid Matlack struct tdp_iter iter; 1527a3fe5dbdSDavid Matlack int ret = 0; 1528a3fe5dbdSDavid Matlack 1529a3fe5dbdSDavid Matlack rcu_read_lock(); 1530a3fe5dbdSDavid Matlack 1531a3fe5dbdSDavid Matlack /* 1532a3fe5dbdSDavid Matlack * Traverse the page table splitting all huge pages above the target 1533a3fe5dbdSDavid Matlack * level into one lower level. For example, if we encounter a 1GB page 1534a3fe5dbdSDavid Matlack * we split it into 512 2MB pages. 1535a3fe5dbdSDavid Matlack * 1536a3fe5dbdSDavid Matlack * Since the TDP iterator uses a pre-order traversal, we are guaranteed 1537a3fe5dbdSDavid Matlack * to visit an SPTE before ever visiting its children, which means we 1538a3fe5dbdSDavid Matlack * will correctly recursively split huge pages that are more than one 1539a3fe5dbdSDavid Matlack * level above the target level (e.g. splitting a 1GB to 512 2MB pages, 1540a3fe5dbdSDavid Matlack * and then splitting each of those to 512 4KB pages). 1541a3fe5dbdSDavid Matlack */ 1542a3fe5dbdSDavid Matlack for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { 1543a3fe5dbdSDavid Matlack retry: 1544cb00a70bSDavid Matlack if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 1545a3fe5dbdSDavid Matlack continue; 1546a3fe5dbdSDavid Matlack 1547a3fe5dbdSDavid Matlack if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) 1548a3fe5dbdSDavid Matlack continue; 1549a3fe5dbdSDavid Matlack 1550a3fe5dbdSDavid Matlack if (!sp) { 1551cb00a70bSDavid Matlack sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); 1552a3fe5dbdSDavid Matlack if (!sp) { 1553a3fe5dbdSDavid Matlack ret = -ENOMEM; 1554e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter.gfn, 1555e0b728b1SDavid Matlack iter.old_spte, 1556e0b728b1SDavid Matlack iter.level, ret); 1557a3fe5dbdSDavid Matlack break; 1558a3fe5dbdSDavid Matlack } 1559a3fe5dbdSDavid Matlack 1560a3fe5dbdSDavid Matlack if (iter.yielded) 1561a3fe5dbdSDavid Matlack continue; 1562a3fe5dbdSDavid Matlack } 1563a3fe5dbdSDavid Matlack 1564cb00a70bSDavid Matlack if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) 1565a3fe5dbdSDavid Matlack goto retry; 1566a3fe5dbdSDavid Matlack 1567a3fe5dbdSDavid Matlack sp = NULL; 1568a3fe5dbdSDavid Matlack } 1569a3fe5dbdSDavid Matlack 1570a3fe5dbdSDavid Matlack rcu_read_unlock(); 1571a3fe5dbdSDavid Matlack 1572a3fe5dbdSDavid Matlack /* 1573a3fe5dbdSDavid Matlack * It's possible to exit the loop having never used the last sp if, for 1574a3fe5dbdSDavid Matlack * example, a vCPU doing HugePage NX splitting wins the race and 1575a3fe5dbdSDavid Matlack * installs its own sp in place of the last sp we tried to split. 1576a3fe5dbdSDavid Matlack */ 1577a3fe5dbdSDavid Matlack if (sp) 1578a3fe5dbdSDavid Matlack tdp_mmu_free_sp(sp); 1579a3fe5dbdSDavid Matlack 1580a3fe5dbdSDavid Matlack return ret; 1581a3fe5dbdSDavid Matlack } 1582a3fe5dbdSDavid Matlack 1583cb00a70bSDavid Matlack 1584a3fe5dbdSDavid Matlack /* 1585a3fe5dbdSDavid Matlack * Try to split all huge pages mapped by the TDP MMU down to the target level. 1586a3fe5dbdSDavid Matlack */ 1587a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 1588a3fe5dbdSDavid Matlack const struct kvm_memory_slot *slot, 1589a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1590cb00a70bSDavid Matlack int target_level, bool shared) 1591a3fe5dbdSDavid Matlack { 1592a3fe5dbdSDavid Matlack struct kvm_mmu_page *root; 1593a3fe5dbdSDavid Matlack int r = 0; 1594a3fe5dbdSDavid Matlack 1595cb00a70bSDavid Matlack kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1596a3fe5dbdSDavid Matlack 15977c554d8eSPaolo Bonzini for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { 1598cb00a70bSDavid Matlack r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); 1599a3fe5dbdSDavid Matlack if (r) { 1600cb00a70bSDavid Matlack kvm_tdp_mmu_put_root(kvm, root, shared); 1601a3fe5dbdSDavid Matlack break; 1602a3fe5dbdSDavid Matlack } 1603a3fe5dbdSDavid Matlack } 1604a3fe5dbdSDavid Matlack } 1605a3fe5dbdSDavid Matlack 1606a6a0b05dSBen Gardon /* 1607a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1608a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1609a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1610a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1611a6a0b05dSBen Gardon * be flushed. 1612a6a0b05dSBen Gardon */ 1613a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1614a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1615a6a0b05dSBen Gardon { 1616a6a0b05dSBen Gardon struct tdp_iter iter; 1617a6a0b05dSBen Gardon u64 new_spte; 1618a6a0b05dSBen Gardon bool spte_set = false; 1619a6a0b05dSBen Gardon 16207cca2d0bSBen Gardon rcu_read_lock(); 16217cca2d0bSBen Gardon 1622a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 162324ae4cfaSBen Gardon retry: 162424ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 16251af4a960SBen Gardon continue; 16261af4a960SBen Gardon 16273354ef5aSSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 16283354ef5aSSean Christopherson continue; 16293354ef5aSSean Christopherson 1630a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1631a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1632a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1633a6a0b05dSBen Gardon else 1634a6a0b05dSBen Gardon continue; 1635a6a0b05dSBen Gardon } else { 1636a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1637a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1638a6a0b05dSBen Gardon else 1639a6a0b05dSBen Gardon continue; 1640a6a0b05dSBen Gardon } 1641a6a0b05dSBen Gardon 16423e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 164324ae4cfaSBen Gardon goto retry; 16443255530aSDavid Matlack 1645a6a0b05dSBen Gardon spte_set = true; 1646a6a0b05dSBen Gardon } 16477cca2d0bSBen Gardon 16487cca2d0bSBen Gardon rcu_read_unlock(); 1649a6a0b05dSBen Gardon return spte_set; 1650a6a0b05dSBen Gardon } 1651a6a0b05dSBen Gardon 1652a6a0b05dSBen Gardon /* 1653a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1654a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1655a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1656a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1657a6a0b05dSBen Gardon * be flushed. 1658a6a0b05dSBen Gardon */ 1659269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1660269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1661a6a0b05dSBen Gardon { 1662a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1663a6a0b05dSBen Gardon bool spte_set = false; 1664a6a0b05dSBen Gardon 166524ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1666a6a0b05dSBen Gardon 1667d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1668a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1669a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1670a6a0b05dSBen Gardon 1671a6a0b05dSBen Gardon return spte_set; 1672a6a0b05dSBen Gardon } 1673a6a0b05dSBen Gardon 1674a6a0b05dSBen Gardon /* 1675a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1676a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1677a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1678a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1679a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1680a6a0b05dSBen Gardon */ 1681a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1682a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1683a6a0b05dSBen Gardon { 1684a6a0b05dSBen Gardon struct tdp_iter iter; 1685a6a0b05dSBen Gardon u64 new_spte; 1686a6a0b05dSBen Gardon 16877cca2d0bSBen Gardon rcu_read_lock(); 16887cca2d0bSBen Gardon 1689a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1690a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1691a6a0b05dSBen Gardon if (!mask) 1692a6a0b05dSBen Gardon break; 1693a6a0b05dSBen Gardon 1694a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1695a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1696a6a0b05dSBen Gardon continue; 1697a6a0b05dSBen Gardon 1698f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1699f1b3b06aSBen Gardon 1700a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1701a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1702a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1703a6a0b05dSBen Gardon else 1704a6a0b05dSBen Gardon continue; 1705a6a0b05dSBen Gardon } else { 1706a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1707a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1708a6a0b05dSBen Gardon else 1709a6a0b05dSBen Gardon continue; 1710a6a0b05dSBen Gardon } 1711a6a0b05dSBen Gardon 1712a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1713a6a0b05dSBen Gardon } 17147cca2d0bSBen Gardon 17157cca2d0bSBen Gardon rcu_read_unlock(); 1716a6a0b05dSBen Gardon } 1717a6a0b05dSBen Gardon 1718a6a0b05dSBen Gardon /* 1719a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1720a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1721a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1722a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1723a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1724a6a0b05dSBen Gardon */ 1725a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1726a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1727a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1728a6a0b05dSBen Gardon bool wrprot) 1729a6a0b05dSBen Gardon { 1730a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1731a6a0b05dSBen Gardon 1732531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1733a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1734a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1735a6a0b05dSBen Gardon } 1736a6a0b05dSBen Gardon 17374b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm, 173814881998SBen Gardon struct kvm_mmu_page *root, 17394b85c921SSean Christopherson const struct kvm_memory_slot *slot) 174014881998SBen Gardon { 17419eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 17429eba50f8SSean Christopherson gfn_t end = start + slot->npages; 174314881998SBen Gardon struct tdp_iter iter; 17445ba7c4c6SBen Gardon int max_mapping_level; 174514881998SBen Gardon 17467cca2d0bSBen Gardon rcu_read_lock(); 17477cca2d0bSBen Gardon 174885f44f8cSSean Christopherson for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { 174985f44f8cSSean Christopherson retry: 17504b85c921SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 17511af4a960SBen Gardon continue; 17521af4a960SBen Gardon 175385f44f8cSSean Christopherson if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || 175485f44f8cSSean Christopherson !is_shadow_present_pte(iter.old_spte)) 175585f44f8cSSean Christopherson continue; 175685f44f8cSSean Christopherson 175785f44f8cSSean Christopherson /* 175885f44f8cSSean Christopherson * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with 175985f44f8cSSean Christopherson * a large page size, then its parent would have been zapped 176085f44f8cSSean Christopherson * instead of stepping down. 176185f44f8cSSean Christopherson */ 176285f44f8cSSean Christopherson if (is_last_spte(iter.old_spte, iter.level)) 176385f44f8cSSean Christopherson continue; 176485f44f8cSSean Christopherson 176585f44f8cSSean Christopherson /* 176685f44f8cSSean Christopherson * If iter.gfn resides outside of the slot, i.e. the page for 176785f44f8cSSean Christopherson * the current level overlaps but is not contained by the slot, 176885f44f8cSSean Christopherson * then the SPTE can't be made huge. More importantly, trying 176985f44f8cSSean Christopherson * to query that info from slot->arch.lpage_info will cause an 177085f44f8cSSean Christopherson * out-of-bounds access. 177185f44f8cSSean Christopherson */ 177285f44f8cSSean Christopherson if (iter.gfn < start || iter.gfn >= end) 177314881998SBen Gardon continue; 177414881998SBen Gardon 17755ba7c4c6SBen Gardon max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, 1776a8ac499bSSean Christopherson iter.gfn, PG_LEVEL_NUM); 177785f44f8cSSean Christopherson if (max_mapping_level < iter.level) 17785ba7c4c6SBen Gardon continue; 17795ba7c4c6SBen Gardon 17804b85c921SSean Christopherson /* Note, a successful atomic zap also does a remote TLB flush. */ 178185f44f8cSSean Christopherson if (tdp_mmu_zap_spte_atomic(kvm, &iter)) 178285f44f8cSSean Christopherson goto retry; 17832db6f772SBen Gardon } 178414881998SBen Gardon 17857cca2d0bSBen Gardon rcu_read_unlock(); 178614881998SBen Gardon } 178714881998SBen Gardon 178814881998SBen Gardon /* 178985f44f8cSSean Christopherson * Zap non-leaf SPTEs (and free their associated page tables) which could 179085f44f8cSSean Christopherson * be replaced by huge pages, for GFNs within the slot. 179114881998SBen Gardon */ 17924b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 17934b85c921SSean Christopherson const struct kvm_memory_slot *slot) 179414881998SBen Gardon { 179514881998SBen Gardon struct kvm_mmu_page *root; 179614881998SBen Gardon 17972db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 179814881998SBen Gardon 1799d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 18004b85c921SSean Christopherson zap_collapsible_spte_range(kvm, root, slot); 180114881998SBen Gardon } 180246044f72SBen Gardon 180346044f72SBen Gardon /* 180446044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 18055fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 180646044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 180746044f72SBen Gardon */ 180846044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 18093ad93562SKeqian Zhu gfn_t gfn, int min_level) 181046044f72SBen Gardon { 181146044f72SBen Gardon struct tdp_iter iter; 181246044f72SBen Gardon u64 new_spte; 181346044f72SBen Gardon bool spte_set = false; 181446044f72SBen Gardon 18153ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 18163ad93562SKeqian Zhu 18177cca2d0bSBen Gardon rcu_read_lock(); 18187cca2d0bSBen Gardon 181977aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { 18203ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 18213ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 18223ad93562SKeqian Zhu continue; 18233ad93562SKeqian Zhu 182446044f72SBen Gardon new_spte = iter.old_spte & 18255fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 182646044f72SBen Gardon 18277c8a4742SDavid Matlack if (new_spte == iter.old_spte) 18287c8a4742SDavid Matlack break; 18297c8a4742SDavid Matlack 183046044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 183146044f72SBen Gardon spte_set = true; 183246044f72SBen Gardon } 183346044f72SBen Gardon 18347cca2d0bSBen Gardon rcu_read_unlock(); 18357cca2d0bSBen Gardon 183646044f72SBen Gardon return spte_set; 183746044f72SBen Gardon } 183846044f72SBen Gardon 183946044f72SBen Gardon /* 184046044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 18415fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 184246044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 184346044f72SBen Gardon */ 184446044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 18453ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 18463ad93562SKeqian Zhu int min_level) 184746044f72SBen Gardon { 184846044f72SBen Gardon struct kvm_mmu_page *root; 184946044f72SBen Gardon bool spte_set = false; 185046044f72SBen Gardon 1851531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1852a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 18533ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1854a3f15bdaSSean Christopherson 185546044f72SBen Gardon return spte_set; 185646044f72SBen Gardon } 185746044f72SBen Gardon 185895fb5b02SBen Gardon /* 185995fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 186095fb5b02SBen Gardon * That SPTE may be non-present. 1861c5c8c7c5SDavid Matlack * 1862c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 186395fb5b02SBen Gardon */ 186439b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 186539b4d43eSSean Christopherson int *root_level) 186695fb5b02SBen Gardon { 186795fb5b02SBen Gardon struct tdp_iter iter; 186895fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 186995fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 18702aa07893SSean Christopherson int leaf = -1; 187195fb5b02SBen Gardon 1872a972e29cSPaolo Bonzini *root_level = vcpu->arch.mmu->root_role.level; 187395fb5b02SBen Gardon 187495fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 187595fb5b02SBen Gardon leaf = iter.level; 1876dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 187795fb5b02SBen Gardon } 187895fb5b02SBen Gardon 187995fb5b02SBen Gardon return leaf; 188095fb5b02SBen Gardon } 18816e8eb206SDavid Matlack 18826e8eb206SDavid Matlack /* 18836e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 18846e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 18856e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 18866e8eb206SDavid Matlack * 18876e8eb206SDavid Matlack * Contract: 18886e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 18896e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 18906e8eb206SDavid Matlack * 18916e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 18926e8eb206SDavid Matlack */ 18936e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 18946e8eb206SDavid Matlack u64 *spte) 18956e8eb206SDavid Matlack { 18966e8eb206SDavid Matlack struct tdp_iter iter; 18976e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 18986e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 18996e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 19006e8eb206SDavid Matlack 19016e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 19026e8eb206SDavid Matlack *spte = iter.old_spte; 19036e8eb206SDavid Matlack sptep = iter.sptep; 19046e8eb206SDavid Matlack } 19056e8eb206SDavid Matlack 19066e8eb206SDavid Matlack /* 19076e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 19086e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 19096e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 19106e8eb206SDavid Matlack * annotation. 19116e8eb206SDavid Matlack * 19126e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 19136e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 19146e8eb206SDavid Matlack * outside of mmu_lock. 19156e8eb206SDavid Matlack */ 19166e8eb206SDavid Matlack return rcu_dereference(sptep); 19176e8eb206SDavid Matlack } 1918