1fe5db27dSBen Gardon // SPDX-License-Identifier: GPL-2.0 2fe5db27dSBen Gardon 302c00b3aSBen Gardon #include "mmu.h" 402c00b3aSBen Gardon #include "mmu_internal.h" 5bb18842eSBen Gardon #include "mmutrace.h" 62f2fad08SBen Gardon #include "tdp_iter.h" 7fe5db27dSBen Gardon #include "tdp_mmu.h" 802c00b3aSBen Gardon #include "spte.h" 9fe5db27dSBen Gardon 109a77daacSBen Gardon #include <asm/cmpxchg.h> 1133dd3574SBen Gardon #include <trace/events/kvm.h> 1233dd3574SBen Gardon 1371ba3f31SPaolo Bonzini static bool __read_mostly tdp_mmu_enabled = true; 1495fb5b02SBen Gardon module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); 15fe5db27dSBen Gardon 16fe5db27dSBen Gardon /* Initializes the TDP MMU for the VM, if enabled. */ 17a1a39128SPaolo Bonzini int kvm_mmu_init_tdp_mmu(struct kvm *kvm) 18fe5db27dSBen Gardon { 19a1a39128SPaolo Bonzini struct workqueue_struct *wq; 20a1a39128SPaolo Bonzini 21897218ffSPaolo Bonzini if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) 22a1a39128SPaolo Bonzini return 0; 23a1a39128SPaolo Bonzini 24a1a39128SPaolo Bonzini wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); 25a1a39128SPaolo Bonzini if (!wq) 26a1a39128SPaolo Bonzini return -ENOMEM; 27fe5db27dSBen Gardon 28fe5db27dSBen Gardon /* This should not be changed for the lifetime of the VM. */ 29fe5db27dSBen Gardon kvm->arch.tdp_mmu_enabled = true; 3002c00b3aSBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); 319a77daacSBen Gardon spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); 3289c0fd49SBen Gardon INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); 33a1a39128SPaolo Bonzini kvm->arch.tdp_mmu_zap_wq = wq; 34a1a39128SPaolo Bonzini return 1; 35fe5db27dSBen Gardon } 36fe5db27dSBen Gardon 37226b8c8fSSean Christopherson /* Arbitrarily returns true so that this may be used in if statements. */ 38226b8c8fSSean Christopherson static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, 396103bc07SBen Gardon bool shared) 406103bc07SBen Gardon { 416103bc07SBen Gardon if (shared) 426103bc07SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 436103bc07SBen Gardon else 446103bc07SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 45226b8c8fSSean Christopherson 46226b8c8fSSean Christopherson return true; 476103bc07SBen Gardon } 486103bc07SBen Gardon 49fe5db27dSBen Gardon void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) 50fe5db27dSBen Gardon { 51fe5db27dSBen Gardon if (!kvm->arch.tdp_mmu_enabled) 52fe5db27dSBen Gardon return; 5302c00b3aSBen Gardon 543203a56aSLv Ruyi /* Also waits for any queued work items. */ 5522b94c4bSPaolo Bonzini destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); 5622b94c4bSPaolo Bonzini 57524a1e4eSSean Christopherson WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); 5802c00b3aSBen Gardon WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); 597cca2d0bSBen Gardon 607cca2d0bSBen Gardon /* 617cca2d0bSBen Gardon * Ensure that all the outstanding RCU callbacks to free shadow pages 6222b94c4bSPaolo Bonzini * can run before the VM is torn down. Work items on tdp_mmu_zap_wq 6322b94c4bSPaolo Bonzini * can call kvm_tdp_mmu_put_root and create new callbacks. 647cca2d0bSBen Gardon */ 657cca2d0bSBen Gardon rcu_barrier(); 6602c00b3aSBen Gardon } 6702c00b3aSBen Gardon 682bdb3d84SBen Gardon static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) 69a889ea54SBen Gardon { 702bdb3d84SBen Gardon free_page((unsigned long)sp->spt); 712bdb3d84SBen Gardon kmem_cache_free(mmu_page_header_cache, sp); 72a889ea54SBen Gardon } 73a889ea54SBen Gardon 74c0e64238SBen Gardon /* 75c0e64238SBen Gardon * This is called through call_rcu in order to free TDP page table memory 76c0e64238SBen Gardon * safely with respect to other kernel threads that may be operating on 77c0e64238SBen Gardon * the memory. 78c0e64238SBen Gardon * By only accessing TDP MMU page table memory in an RCU read critical 79c0e64238SBen Gardon * section, and freeing it after a grace period, lockless access to that 80c0e64238SBen Gardon * memory won't use it after it is freed. 81c0e64238SBen Gardon */ 82c0e64238SBen Gardon static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) 83a889ea54SBen Gardon { 84c0e64238SBen Gardon struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, 85c0e64238SBen Gardon rcu_head); 86a889ea54SBen Gardon 87c0e64238SBen Gardon tdp_mmu_free_sp(sp); 88a889ea54SBen Gardon } 89a889ea54SBen Gardon 90e2b5b21dSSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 91e2b5b21dSSean Christopherson bool shared); 92e2b5b21dSSean Christopherson 9322b94c4bSPaolo Bonzini static void tdp_mmu_zap_root_work(struct work_struct *work) 9422b94c4bSPaolo Bonzini { 9522b94c4bSPaolo Bonzini struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page, 9622b94c4bSPaolo Bonzini tdp_mmu_async_work); 9722b94c4bSPaolo Bonzini struct kvm *kvm = root->tdp_mmu_async_data; 9822b94c4bSPaolo Bonzini 9922b94c4bSPaolo Bonzini read_lock(&kvm->mmu_lock); 10022b94c4bSPaolo Bonzini 10122b94c4bSPaolo Bonzini /* 10222b94c4bSPaolo Bonzini * A TLB flush is not necessary as KVM performs a local TLB flush when 10322b94c4bSPaolo Bonzini * allocating a new root (see kvm_mmu_load()), and when migrating vCPU 10422b94c4bSPaolo Bonzini * to a different pCPU. Note, the local TLB flush on reuse also 10522b94c4bSPaolo Bonzini * invalidates any paging-structure-cache entries, i.e. TLB entries for 10622b94c4bSPaolo Bonzini * intermediate paging structures, that may be zapped, as such entries 10722b94c4bSPaolo Bonzini * are associated with the ASID on both VMX and SVM. 10822b94c4bSPaolo Bonzini */ 10922b94c4bSPaolo Bonzini tdp_mmu_zap_root(kvm, root, true); 11022b94c4bSPaolo Bonzini 11122b94c4bSPaolo Bonzini /* 11222b94c4bSPaolo Bonzini * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for 11322b94c4bSPaolo Bonzini * avoiding an infinite loop. By design, the root is reachable while 11422b94c4bSPaolo Bonzini * it's being asynchronously zapped, thus a different task can put its 11522b94c4bSPaolo Bonzini * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an 11622b94c4bSPaolo Bonzini * asynchronously zapped root is unavoidable. 11722b94c4bSPaolo Bonzini */ 11822b94c4bSPaolo Bonzini kvm_tdp_mmu_put_root(kvm, root, true); 11922b94c4bSPaolo Bonzini 12022b94c4bSPaolo Bonzini read_unlock(&kvm->mmu_lock); 12122b94c4bSPaolo Bonzini } 12222b94c4bSPaolo Bonzini 12322b94c4bSPaolo Bonzini static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root) 12422b94c4bSPaolo Bonzini { 12522b94c4bSPaolo Bonzini root->tdp_mmu_async_data = kvm; 12622b94c4bSPaolo Bonzini INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work); 12722b94c4bSPaolo Bonzini queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); 12822b94c4bSPaolo Bonzini } 12922b94c4bSPaolo Bonzini 1308351779cSPaolo Bonzini static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) 1318351779cSPaolo Bonzini { 1328351779cSPaolo Bonzini union kvm_mmu_page_role role = page->role; 1338351779cSPaolo Bonzini role.invalid = true; 1348351779cSPaolo Bonzini 1358351779cSPaolo Bonzini /* No need to use cmpxchg, only the invalid bit can change. */ 1368351779cSPaolo Bonzini role.word = xchg(&page->role.word, role.word); 1378351779cSPaolo Bonzini return role.invalid; 1388351779cSPaolo Bonzini } 1398351779cSPaolo Bonzini 1406103bc07SBen Gardon void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 1416103bc07SBen Gardon bool shared) 1422bdb3d84SBen Gardon { 1436103bc07SBen Gardon kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1442bdb3d84SBen Gardon 14511cccf5cSBen Gardon if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) 1462bdb3d84SBen Gardon return; 1472bdb3d84SBen Gardon 1482bdb3d84SBen Gardon WARN_ON(!root->tdp_mmu_page); 1492bdb3d84SBen Gardon 1508351779cSPaolo Bonzini /* 1518351779cSPaolo Bonzini * The root now has refcount=0. It is valid, but readers already 1528351779cSPaolo Bonzini * cannot acquire a reference to it because kvm_tdp_mmu_get_root() 1538351779cSPaolo Bonzini * rejects it. This remains true for the rest of the execution 1548351779cSPaolo Bonzini * of this function, because readers visit valid roots only 1558351779cSPaolo Bonzini * (except for tdp_mmu_zap_root_work(), which however 1568351779cSPaolo Bonzini * does not acquire any reference itself). 1578351779cSPaolo Bonzini * 1588351779cSPaolo Bonzini * Even though there are flows that need to visit all roots for 1598351779cSPaolo Bonzini * correctness, they all take mmu_lock for write, so they cannot yet 1608351779cSPaolo Bonzini * run concurrently. The same is true after kvm_tdp_root_mark_invalid, 1618351779cSPaolo Bonzini * since the root still has refcount=0. 1628351779cSPaolo Bonzini * 1638351779cSPaolo Bonzini * However, tdp_mmu_zap_root can yield, and writers do not expect to 1648351779cSPaolo Bonzini * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). 1658351779cSPaolo Bonzini * So the root temporarily gets an extra reference, going to refcount=1 1668351779cSPaolo Bonzini * while staying invalid. Readers still cannot acquire any reference; 1678351779cSPaolo Bonzini * but writers are now allowed to run if tdp_mmu_zap_root yields and 168efd995daSPaolo Bonzini * they might take an extra reference if they themselves yield. 169efd995daSPaolo Bonzini * Therefore, when the reference is given back by the worker, 1708351779cSPaolo Bonzini * there is no guarantee that the refcount is still 1. If not, whoever 1718351779cSPaolo Bonzini * puts the last reference will free the page, but they will not have to 1728351779cSPaolo Bonzini * zap the root because a root cannot go from invalid to valid. 1738351779cSPaolo Bonzini */ 1748351779cSPaolo Bonzini if (!kvm_tdp_root_mark_invalid(root)) { 1758351779cSPaolo Bonzini refcount_set(&root->tdp_mmu_root_count, 1); 1768351779cSPaolo Bonzini 1778351779cSPaolo Bonzini /* 178efd995daSPaolo Bonzini * Zapping the root in a worker is not just "nice to have"; 179efd995daSPaolo Bonzini * it is required because kvm_tdp_mmu_invalidate_all_roots() 180efd995daSPaolo Bonzini * skips already-invalid roots. If kvm_tdp_mmu_put_root() did 181efd995daSPaolo Bonzini * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() 182efd995daSPaolo Bonzini * might return with some roots not zapped yet. 1838351779cSPaolo Bonzini */ 184efd995daSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 1858351779cSPaolo Bonzini return; 1868351779cSPaolo Bonzini } 1878351779cSPaolo Bonzini 188c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 189c0e64238SBen Gardon list_del_rcu(&root->link); 190c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 191c0e64238SBen Gardon call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); 192a889ea54SBen Gardon } 193a889ea54SBen Gardon 194cfc10997SBen Gardon /* 195d62007edSSean Christopherson * Returns the next root after @prev_root (or the first root if @prev_root is 196d62007edSSean Christopherson * NULL). A reference to the returned root is acquired, and the reference to 197d62007edSSean Christopherson * @prev_root is released (the caller obviously must hold a reference to 198d62007edSSean Christopherson * @prev_root if it's non-NULL). 199d62007edSSean Christopherson * 200d62007edSSean Christopherson * If @only_valid is true, invalid roots are skipped. 201d62007edSSean Christopherson * 202d62007edSSean Christopherson * Returns NULL if the end of tdp_mmu_roots was reached. 203cfc10997SBen Gardon */ 204cfc10997SBen Gardon static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, 2056103bc07SBen Gardon struct kvm_mmu_page *prev_root, 206d62007edSSean Christopherson bool shared, bool only_valid) 207a889ea54SBen Gardon { 208a889ea54SBen Gardon struct kvm_mmu_page *next_root; 209a889ea54SBen Gardon 210c0e64238SBen Gardon rcu_read_lock(); 211c0e64238SBen Gardon 212cfc10997SBen Gardon if (prev_root) 213c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 214c0e64238SBen Gardon &prev_root->link, 215c0e64238SBen Gardon typeof(*prev_root), link); 216cfc10997SBen Gardon else 217c0e64238SBen Gardon next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, 218cfc10997SBen Gardon typeof(*next_root), link); 219cfc10997SBen Gardon 22004dc4e6cSSean Christopherson while (next_root) { 221d62007edSSean Christopherson if ((!only_valid || !next_root->role.invalid) && 222ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(next_root)) 22304dc4e6cSSean Christopherson break; 22404dc4e6cSSean Christopherson 225c0e64238SBen Gardon next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, 226c0e64238SBen Gardon &next_root->link, typeof(*next_root), link); 22704dc4e6cSSean Christopherson } 228fb101293SBen Gardon 229c0e64238SBen Gardon rcu_read_unlock(); 230cfc10997SBen Gardon 231cfc10997SBen Gardon if (prev_root) 2326103bc07SBen Gardon kvm_tdp_mmu_put_root(kvm, prev_root, shared); 233cfc10997SBen Gardon 234a889ea54SBen Gardon return next_root; 235a889ea54SBen Gardon } 236a889ea54SBen Gardon 237a889ea54SBen Gardon /* 238a889ea54SBen Gardon * Note: this iterator gets and puts references to the roots it iterates over. 239a889ea54SBen Gardon * This makes it safe to release the MMU lock and yield within the loop, but 240a889ea54SBen Gardon * if exiting the loop early, the caller must drop the reference to the most 241a889ea54SBen Gardon * recent root. (Unless keeping a live reference is desirable.) 2426103bc07SBen Gardon * 2436103bc07SBen Gardon * If shared is set, this function is operating under the MMU lock in read 2446103bc07SBen Gardon * mode. In the unlikely event that this thread must free a root, the lock 2456103bc07SBen Gardon * will be temporarily dropped and reacquired in write mode. 246a889ea54SBen Gardon */ 247d62007edSSean Christopherson #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ 248d62007edSSean Christopherson for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ 249cfc10997SBen Gardon _root; \ 250d62007edSSean Christopherson _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ 251614f6970SPaolo Bonzini if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \ 252614f6970SPaolo Bonzini kvm_mmu_page_as_id(_root) != _as_id) { \ 253a3f15bdaSSean Christopherson } else 254a889ea54SBen Gardon 255d62007edSSean Christopherson #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ 256d62007edSSean Christopherson __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) 257d62007edSSean Christopherson 258614f6970SPaolo Bonzini #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ 259614f6970SPaolo Bonzini __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) 260d62007edSSean Christopherson 261226b8c8fSSean Christopherson /* 262226b8c8fSSean Christopherson * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 263226b8c8fSSean Christopherson * the implication being that any flow that holds mmu_lock for read is 264226b8c8fSSean Christopherson * inherently yield-friendly and should use the yield-safe variant above. 265226b8c8fSSean Christopherson * Holding mmu_lock for write obviates the need for RCU protection as the list 266226b8c8fSSean Christopherson * is guaranteed to be stable. 267226b8c8fSSean Christopherson */ 268a3f15bdaSSean Christopherson #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ 269226b8c8fSSean Christopherson list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ 270226b8c8fSSean Christopherson if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ 271226b8c8fSSean Christopherson kvm_mmu_page_as_id(_root) != _as_id) { \ 272a3f15bdaSSean Christopherson } else 27302c00b3aSBen Gardon 274a82070b6SDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) 27502c00b3aSBen Gardon { 27602c00b3aSBen Gardon struct kvm_mmu_page *sp; 27702c00b3aSBen Gardon 27802c00b3aSBen Gardon sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 27902c00b3aSBen Gardon sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 280a82070b6SDavid Matlack 281a82070b6SDavid Matlack return sp; 282a82070b6SDavid Matlack } 283a82070b6SDavid Matlack 284c10743a1SSean Christopherson static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, 285c10743a1SSean Christopherson gfn_t gfn, union kvm_mmu_page_role role) 286a82070b6SDavid Matlack { 28702c00b3aSBen Gardon set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 28802c00b3aSBen Gardon 289a3aca4deSDavid Matlack sp->role = role; 29002c00b3aSBen Gardon sp->gfn = gfn; 291c10743a1SSean Christopherson sp->ptep = sptep; 29202c00b3aSBen Gardon sp->tdp_mmu_page = true; 29302c00b3aSBen Gardon 29433dd3574SBen Gardon trace_kvm_mmu_get_page(sp, true); 29502c00b3aSBen Gardon } 29602c00b3aSBen Gardon 297a82070b6SDavid Matlack static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, 298a3aca4deSDavid Matlack struct tdp_iter *iter) 299a3aca4deSDavid Matlack { 300a3aca4deSDavid Matlack struct kvm_mmu_page *parent_sp; 301a3aca4deSDavid Matlack union kvm_mmu_page_role role; 302a3aca4deSDavid Matlack 303a3aca4deSDavid Matlack parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); 304a3aca4deSDavid Matlack 305a3aca4deSDavid Matlack role = parent_sp->role; 306a3aca4deSDavid Matlack role.level--; 307a3aca4deSDavid Matlack 308c10743a1SSean Christopherson tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); 309a3aca4deSDavid Matlack } 310a3aca4deSDavid Matlack 3116e6ec584SSean Christopherson hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) 31202c00b3aSBen Gardon { 313a3aca4deSDavid Matlack union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base; 31402c00b3aSBen Gardon struct kvm *kvm = vcpu->kvm; 31502c00b3aSBen Gardon struct kvm_mmu_page *root; 31602c00b3aSBen Gardon 3176e6ec584SSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 31802c00b3aSBen Gardon 31904dc4e6cSSean Christopherson /* 32004dc4e6cSSean Christopherson * Check for an existing root before allocating a new one. Note, the 32104dc4e6cSSean Christopherson * role check prevents consuming an invalid root. 32204dc4e6cSSean Christopherson */ 323a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { 324fb101293SBen Gardon if (root->role.word == role.word && 325ad6d6b94SJinrong Liang kvm_tdp_mmu_get_root(root)) 3266e6ec584SSean Christopherson goto out; 32702c00b3aSBen Gardon } 32802c00b3aSBen Gardon 329a82070b6SDavid Matlack root = tdp_mmu_alloc_sp(vcpu); 330c10743a1SSean Christopherson tdp_mmu_init_sp(root, NULL, 0, role); 331a82070b6SDavid Matlack 33211cccf5cSBen Gardon refcount_set(&root->tdp_mmu_root_count, 1); 33302c00b3aSBen Gardon 334c0e64238SBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 335c0e64238SBen Gardon list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); 336c0e64238SBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 33702c00b3aSBen Gardon 3386e6ec584SSean Christopherson out: 33902c00b3aSBen Gardon return __pa(root->spt); 340fe5db27dSBen Gardon } 3412f2fad08SBen Gardon 3422f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 3439a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 3449a77daacSBen Gardon bool shared); 3452f2fad08SBen Gardon 346f8e14497SBen Gardon static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) 347f8e14497SBen Gardon { 348f8e14497SBen Gardon if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) 349f8e14497SBen Gardon return; 350f8e14497SBen Gardon 351f8e14497SBen Gardon if (is_accessed_spte(old_spte) && 35264bb2769SSean Christopherson (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || 35364bb2769SSean Christopherson spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) 354f8e14497SBen Gardon kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 355f8e14497SBen Gardon } 356f8e14497SBen Gardon 357a6a0b05dSBen Gardon static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, 358a6a0b05dSBen Gardon u64 old_spte, u64 new_spte, int level) 359a6a0b05dSBen Gardon { 360a6a0b05dSBen Gardon bool pfn_changed; 361a6a0b05dSBen Gardon struct kvm_memory_slot *slot; 362a6a0b05dSBen Gardon 363a6a0b05dSBen Gardon if (level > PG_LEVEL_4K) 364a6a0b05dSBen Gardon return; 365a6a0b05dSBen Gardon 366a6a0b05dSBen Gardon pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 367a6a0b05dSBen Gardon 368a6a0b05dSBen Gardon if ((!is_writable_pte(old_spte) || pfn_changed) && 369a6a0b05dSBen Gardon is_writable_pte(new_spte)) { 370a6a0b05dSBen Gardon slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); 371fb04a1edSPeter Xu mark_page_dirty_in_slot(kvm, slot, gfn); 372a6a0b05dSBen Gardon } 373a6a0b05dSBen Gardon } 374a6a0b05dSBen Gardon 3752f2fad08SBen Gardon /** 376c298a30cSDavid Matlack * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages 377a9442f59SBen Gardon * 378a9442f59SBen Gardon * @kvm: kvm instance 379a9442f59SBen Gardon * @sp: the page to be removed 3809a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 3819a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 3829a77daacSBen Gardon * threads that might be adding or removing pages. 383a9442f59SBen Gardon */ 384c298a30cSDavid Matlack static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, 3859a77daacSBen Gardon bool shared) 386a9442f59SBen Gardon { 3879a77daacSBen Gardon if (shared) 3889a77daacSBen Gardon spin_lock(&kvm->arch.tdp_mmu_pages_lock); 3899a77daacSBen Gardon else 390a9442f59SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 391a9442f59SBen Gardon 392a9442f59SBen Gardon list_del(&sp->link); 393a9442f59SBen Gardon if (sp->lpage_disallowed) 394a9442f59SBen Gardon unaccount_huge_nx_page(kvm, sp); 3959a77daacSBen Gardon 3969a77daacSBen Gardon if (shared) 3979a77daacSBen Gardon spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 398a9442f59SBen Gardon } 399a9442f59SBen Gardon 400a9442f59SBen Gardon /** 4010f53dfa3SDavid Matlack * handle_removed_pt() - handle a page table removed from the TDP structure 402a066e61fSBen Gardon * 403a066e61fSBen Gardon * @kvm: kvm instance 404a066e61fSBen Gardon * @pt: the page removed from the paging structure 4059a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use 4069a77daacSBen Gardon * of the MMU lock and the operation must synchronize with other 4079a77daacSBen Gardon * threads that might be modifying SPTEs. 408a066e61fSBen Gardon * 409a066e61fSBen Gardon * Given a page table that has been removed from the TDP paging structure, 410a066e61fSBen Gardon * iterates through the page table to clear SPTEs and free child page tables. 41170fb3e41SBen Gardon * 41270fb3e41SBen Gardon * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU 41370fb3e41SBen Gardon * protection. Since this thread removed it from the paging structure, 41470fb3e41SBen Gardon * this thread will be responsible for ensuring the page is freed. Hence the 41570fb3e41SBen Gardon * early rcu_dereferences in the function. 416a066e61fSBen Gardon */ 4170f53dfa3SDavid Matlack static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) 418a066e61fSBen Gardon { 41970fb3e41SBen Gardon struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); 420a066e61fSBen Gardon int level = sp->role.level; 421e25f0e0cSBen Gardon gfn_t base_gfn = sp->gfn; 422a066e61fSBen Gardon int i; 423a066e61fSBen Gardon 424a066e61fSBen Gardon trace_kvm_mmu_prepare_zap_page(sp); 425a066e61fSBen Gardon 426c298a30cSDavid Matlack tdp_mmu_unlink_sp(kvm, sp, shared); 427a066e61fSBen Gardon 428a066e61fSBen Gardon for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 429574c3c55SBen Gardon u64 *sptep = rcu_dereference(pt) + i; 430574c3c55SBen Gardon gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); 431574c3c55SBen Gardon u64 old_child_spte; 4329a77daacSBen Gardon 4339a77daacSBen Gardon if (shared) { 434e25f0e0cSBen Gardon /* 435e25f0e0cSBen Gardon * Set the SPTE to a nonpresent value that other 436e25f0e0cSBen Gardon * threads will not overwrite. If the SPTE was 437e25f0e0cSBen Gardon * already marked as removed then another thread 438e25f0e0cSBen Gardon * handling a page fault could overwrite it, so 439e25f0e0cSBen Gardon * set the SPTE until it is set from some other 440e25f0e0cSBen Gardon * value to the removed SPTE value. 441e25f0e0cSBen Gardon */ 442e25f0e0cSBen Gardon for (;;) { 443e25f0e0cSBen Gardon old_child_spte = xchg(sptep, REMOVED_SPTE); 444e25f0e0cSBen Gardon if (!is_removed_spte(old_child_spte)) 445e25f0e0cSBen Gardon break; 446e25f0e0cSBen Gardon cpu_relax(); 447e25f0e0cSBen Gardon } 4489a77daacSBen Gardon } else { 4498df9f1afSSean Christopherson /* 4508df9f1afSSean Christopherson * If the SPTE is not MMU-present, there is no backing 4518df9f1afSSean Christopherson * page associated with the SPTE and so no side effects 4528df9f1afSSean Christopherson * that need to be recorded, and exclusive ownership of 4538df9f1afSSean Christopherson * mmu_lock ensures the SPTE can't be made present. 4548df9f1afSSean Christopherson * Note, zapping MMIO SPTEs is also unnecessary as they 4558df9f1afSSean Christopherson * are guarded by the memslots generation, not by being 4568df9f1afSSean Christopherson * unreachable. 4578df9f1afSSean Christopherson */ 4589a77daacSBen Gardon old_child_spte = READ_ONCE(*sptep); 4598df9f1afSSean Christopherson if (!is_shadow_present_pte(old_child_spte)) 4608df9f1afSSean Christopherson continue; 461e25f0e0cSBen Gardon 462e25f0e0cSBen Gardon /* 463e25f0e0cSBen Gardon * Marking the SPTE as a removed SPTE is not 464e25f0e0cSBen Gardon * strictly necessary here as the MMU lock will 465e25f0e0cSBen Gardon * stop other threads from concurrently modifying 466e25f0e0cSBen Gardon * this SPTE. Using the removed SPTE value keeps 467e25f0e0cSBen Gardon * the two branches consistent and simplifies 468e25f0e0cSBen Gardon * the function. 469e25f0e0cSBen Gardon */ 470e25f0e0cSBen Gardon WRITE_ONCE(*sptep, REMOVED_SPTE); 4719a77daacSBen Gardon } 472e25f0e0cSBen Gardon handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, 473f1b83255SKai Huang old_child_spte, REMOVED_SPTE, level, 474e25f0e0cSBen Gardon shared); 475a066e61fSBen Gardon } 476a066e61fSBen Gardon 4777cca2d0bSBen Gardon call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); 478a066e61fSBen Gardon } 479a066e61fSBen Gardon 480a066e61fSBen Gardon /** 4817f6231a3SKai Huang * __handle_changed_spte - handle bookkeeping associated with an SPTE change 4822f2fad08SBen Gardon * @kvm: kvm instance 4832f2fad08SBen Gardon * @as_id: the address space of the paging structure the SPTE was a part of 4842f2fad08SBen Gardon * @gfn: the base GFN that was mapped by the SPTE 4852f2fad08SBen Gardon * @old_spte: The value of the SPTE before the change 4862f2fad08SBen Gardon * @new_spte: The value of the SPTE after the change 4872f2fad08SBen Gardon * @level: the level of the PT the SPTE is part of in the paging structure 4889a77daacSBen Gardon * @shared: This operation may not be running under the exclusive use of 4899a77daacSBen Gardon * the MMU lock and the operation must synchronize with other 4909a77daacSBen Gardon * threads that might be modifying SPTEs. 4912f2fad08SBen Gardon * 4922f2fad08SBen Gardon * Handle bookkeeping that might result from the modification of a SPTE. 4932f2fad08SBen Gardon * This function must be called for all TDP SPTE modifications. 4942f2fad08SBen Gardon */ 4952f2fad08SBen Gardon static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 4969a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 4979a77daacSBen Gardon bool shared) 4982f2fad08SBen Gardon { 4992f2fad08SBen Gardon bool was_present = is_shadow_present_pte(old_spte); 5002f2fad08SBen Gardon bool is_present = is_shadow_present_pte(new_spte); 5012f2fad08SBen Gardon bool was_leaf = was_present && is_last_spte(old_spte, level); 5022f2fad08SBen Gardon bool is_leaf = is_present && is_last_spte(new_spte, level); 5032f2fad08SBen Gardon bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); 5042f2fad08SBen Gardon 5052f2fad08SBen Gardon WARN_ON(level > PT64_ROOT_MAX_LEVEL); 5062f2fad08SBen Gardon WARN_ON(level < PG_LEVEL_4K); 507764388ceSSean Christopherson WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); 5082f2fad08SBen Gardon 5092f2fad08SBen Gardon /* 5102f2fad08SBen Gardon * If this warning were to trigger it would indicate that there was a 5112f2fad08SBen Gardon * missing MMU notifier or a race with some notifier handler. 5122f2fad08SBen Gardon * A present, leaf SPTE should never be directly replaced with another 513d9f6e12fSIngo Molnar * present leaf SPTE pointing to a different PFN. A notifier handler 5142f2fad08SBen Gardon * should be zapping the SPTE before the main MM's page table is 5152f2fad08SBen Gardon * changed, or the SPTE should be zeroed, and the TLBs flushed by the 5162f2fad08SBen Gardon * thread before replacement. 5172f2fad08SBen Gardon */ 5182f2fad08SBen Gardon if (was_leaf && is_leaf && pfn_changed) { 5192f2fad08SBen Gardon pr_err("Invalid SPTE change: cannot replace a present leaf\n" 5202f2fad08SBen Gardon "SPTE with another present leaf SPTE mapping a\n" 5212f2fad08SBen Gardon "different PFN!\n" 5222f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5232f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5242f2fad08SBen Gardon 5252f2fad08SBen Gardon /* 5262f2fad08SBen Gardon * Crash the host to prevent error propagation and guest data 527d9f6e12fSIngo Molnar * corruption. 5282f2fad08SBen Gardon */ 5292f2fad08SBen Gardon BUG(); 5302f2fad08SBen Gardon } 5312f2fad08SBen Gardon 5322f2fad08SBen Gardon if (old_spte == new_spte) 5332f2fad08SBen Gardon return; 5342f2fad08SBen Gardon 535b9a98c34SBen Gardon trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); 536b9a98c34SBen Gardon 537115111efSDavid Matlack if (is_leaf) 538115111efSDavid Matlack check_spte_writable_invariants(new_spte); 539115111efSDavid Matlack 5402f2fad08SBen Gardon /* 5412f2fad08SBen Gardon * The only times a SPTE should be changed from a non-present to 5422f2fad08SBen Gardon * non-present state is when an MMIO entry is installed/modified/ 5432f2fad08SBen Gardon * removed. In that case, there is nothing to do here. 5442f2fad08SBen Gardon */ 5452f2fad08SBen Gardon if (!was_present && !is_present) { 5462f2fad08SBen Gardon /* 54708f07c80SBen Gardon * If this change does not involve a MMIO SPTE or removed SPTE, 54808f07c80SBen Gardon * it is unexpected. Log the change, though it should not 54908f07c80SBen Gardon * impact the guest since both the former and current SPTEs 55008f07c80SBen Gardon * are nonpresent. 5512f2fad08SBen Gardon */ 55208f07c80SBen Gardon if (WARN_ON(!is_mmio_spte(old_spte) && 55308f07c80SBen Gardon !is_mmio_spte(new_spte) && 55408f07c80SBen Gardon !is_removed_spte(new_spte))) 5552f2fad08SBen Gardon pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" 5562f2fad08SBen Gardon "should not be replaced with another,\n" 5572f2fad08SBen Gardon "different nonpresent SPTE, unless one or both\n" 55808f07c80SBen Gardon "are MMIO SPTEs, or the new SPTE is\n" 55908f07c80SBen Gardon "a temporary removed SPTE.\n" 5602f2fad08SBen Gardon "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d", 5612f2fad08SBen Gardon as_id, gfn, old_spte, new_spte, level); 5622f2fad08SBen Gardon return; 5632f2fad08SBen Gardon } 5642f2fad08SBen Gardon 56571f51d2cSMingwei Zhang if (is_leaf != was_leaf) 56671f51d2cSMingwei Zhang kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); 5672f2fad08SBen Gardon 5682f2fad08SBen Gardon if (was_leaf && is_dirty_spte(old_spte) && 56964bb2769SSean Christopherson (!is_present || !is_dirty_spte(new_spte) || pfn_changed)) 5702f2fad08SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 5712f2fad08SBen Gardon 5722f2fad08SBen Gardon /* 5732f2fad08SBen Gardon * Recursively handle child PTs if the change removed a subtree from 574c8e5a0d0SSean Christopherson * the paging structure. Note the WARN on the PFN changing without the 575c8e5a0d0SSean Christopherson * SPTE being converted to a hugepage (leaf) or being zapped. Shadow 576c8e5a0d0SSean Christopherson * pages are kernel allocations and should never be migrated. 5772f2fad08SBen Gardon */ 578c8e5a0d0SSean Christopherson if (was_present && !was_leaf && 579c8e5a0d0SSean Christopherson (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) 5800f53dfa3SDavid Matlack handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); 5812f2fad08SBen Gardon } 5822f2fad08SBen Gardon 5832f2fad08SBen Gardon static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 5849a77daacSBen Gardon u64 old_spte, u64 new_spte, int level, 5859a77daacSBen Gardon bool shared) 5862f2fad08SBen Gardon { 5879a77daacSBen Gardon __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, 5889a77daacSBen Gardon shared); 589f8e14497SBen Gardon handle_changed_spte_acc_track(old_spte, new_spte, level); 590a6a0b05dSBen Gardon handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 591a6a0b05dSBen Gardon new_spte, level); 5922f2fad08SBen Gardon } 593faaf05b0SBen Gardon 594fe43fa2fSBen Gardon /* 5956ccf4438SPaolo Bonzini * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically 5966ccf4438SPaolo Bonzini * and handle the associated bookkeeping. Do not mark the page dirty 59724ae4cfaSBen Gardon * in KVM's dirty bitmaps. 5989a77daacSBen Gardon * 5993255530aSDavid Matlack * If setting the SPTE fails because it has changed, iter->old_spte will be 6003255530aSDavid Matlack * refreshed to the current value of the spte. 6013255530aSDavid Matlack * 6029a77daacSBen Gardon * @kvm: kvm instance 6039a77daacSBen Gardon * @iter: a tdp_iter instance currently on the SPTE that should be set 6049a77daacSBen Gardon * @new_spte: The value the SPTE should be set to 6053e72c791SDavid Matlack * Return: 6063e72c791SDavid Matlack * * 0 - If the SPTE was set. 6073e72c791SDavid Matlack * * -EBUSY - If the SPTE cannot be set. In this case this function will have 6083e72c791SDavid Matlack * no side-effects other than setting iter->old_spte to the last 6093e72c791SDavid Matlack * known value of the spte. 6109a77daacSBen Gardon */ 6113e72c791SDavid Matlack static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, 6129a77daacSBen Gardon struct tdp_iter *iter, 6139a77daacSBen Gardon u64 new_spte) 6149a77daacSBen Gardon { 6153255530aSDavid Matlack u64 *sptep = rcu_dereference(iter->sptep); 6163255530aSDavid Matlack u64 old_spte; 6173255530aSDavid Matlack 618396fd74dSSean Christopherson /* 619396fd74dSSean Christopherson * The caller is responsible for ensuring the old SPTE is not a REMOVED 620396fd74dSSean Christopherson * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, 621396fd74dSSean Christopherson * and pre-checking before inserting a new SPTE is advantageous as it 622396fd74dSSean Christopherson * avoids unnecessary work. 623396fd74dSSean Christopherson */ 624396fd74dSSean Christopherson WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); 6253a0f64deSSean Christopherson 6269a77daacSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 6279a77daacSBen Gardon 62808f07c80SBen Gardon /* 6296e8eb206SDavid Matlack * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and 6306e8eb206SDavid Matlack * does not hold the mmu_lock. 6316e8eb206SDavid Matlack */ 6323255530aSDavid Matlack old_spte = cmpxchg64(sptep, iter->old_spte, new_spte); 6333255530aSDavid Matlack if (old_spte != iter->old_spte) { 6343255530aSDavid Matlack /* 6353255530aSDavid Matlack * The page table entry was modified by a different logical 6363255530aSDavid Matlack * CPU. Refresh iter->old_spte with the current value so the 6373255530aSDavid Matlack * caller operates on fresh data, e.g. if it retries 6383255530aSDavid Matlack * tdp_mmu_set_spte_atomic(). 6393255530aSDavid Matlack */ 6403255530aSDavid Matlack iter->old_spte = old_spte; 6413e72c791SDavid Matlack return -EBUSY; 6423255530aSDavid Matlack } 6439a77daacSBen Gardon 64424ae4cfaSBen Gardon __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, 64508889894SSean Christopherson new_spte, iter->level, true); 64624ae4cfaSBen Gardon handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); 6479a77daacSBen Gardon 6483e72c791SDavid Matlack return 0; 6499a77daacSBen Gardon } 6509a77daacSBen Gardon 6513e72c791SDavid Matlack static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, 65208f07c80SBen Gardon struct tdp_iter *iter) 65308f07c80SBen Gardon { 6543e72c791SDavid Matlack int ret; 6553e72c791SDavid Matlack 65608f07c80SBen Gardon /* 65708f07c80SBen Gardon * Freeze the SPTE by setting it to a special, 65808f07c80SBen Gardon * non-present value. This will stop other threads from 65908f07c80SBen Gardon * immediately installing a present entry in its place 66008f07c80SBen Gardon * before the TLBs are flushed. 66108f07c80SBen Gardon */ 6623e72c791SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); 6633e72c791SDavid Matlack if (ret) 6643e72c791SDavid Matlack return ret; 66508f07c80SBen Gardon 66608f07c80SBen Gardon kvm_flush_remote_tlbs_with_address(kvm, iter->gfn, 66708f07c80SBen Gardon KVM_PAGES_PER_HPAGE(iter->level)); 66808f07c80SBen Gardon 66908f07c80SBen Gardon /* 67008f07c80SBen Gardon * No other thread can overwrite the removed SPTE as they 67108f07c80SBen Gardon * must either wait on the MMU lock or use 672d9f6e12fSIngo Molnar * tdp_mmu_set_spte_atomic which will not overwrite the 67308f07c80SBen Gardon * special removed SPTE value. No bookkeeping is needed 67408f07c80SBen Gardon * here since the SPTE is going from non-present 67508f07c80SBen Gardon * to non-present. 67608f07c80SBen Gardon */ 6770e587aa7SSean Christopherson kvm_tdp_mmu_write_spte(iter->sptep, 0); 67808f07c80SBen Gardon 6793e72c791SDavid Matlack return 0; 68008f07c80SBen Gardon } 68108f07c80SBen Gardon 6829a77daacSBen Gardon 6839a77daacSBen Gardon /* 684fe43fa2fSBen Gardon * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping 685626808d1SSean Christopherson * @kvm: KVM instance 686626808d1SSean Christopherson * @as_id: Address space ID, i.e. regular vs. SMM 687626808d1SSean Christopherson * @sptep: Pointer to the SPTE 688626808d1SSean Christopherson * @old_spte: The current value of the SPTE 689626808d1SSean Christopherson * @new_spte: The new value that will be set for the SPTE 690626808d1SSean Christopherson * @gfn: The base GFN that was (or will be) mapped by the SPTE 691626808d1SSean Christopherson * @level: The level _containing_ the SPTE (its parent PT's level) 692fe43fa2fSBen Gardon * @record_acc_track: Notify the MM subsystem of changes to the accessed state 693fe43fa2fSBen Gardon * of the page. Should be set unless handling an MMU 694fe43fa2fSBen Gardon * notifier for access tracking. Leaving record_acc_track 695fe43fa2fSBen Gardon * unset in that case prevents page accesses from being 696fe43fa2fSBen Gardon * double counted. 697fe43fa2fSBen Gardon * @record_dirty_log: Record the page as dirty in the dirty bitmap if 698fe43fa2fSBen Gardon * appropriate for the change being made. Should be set 699fe43fa2fSBen Gardon * unless performing certain dirty logging operations. 700fe43fa2fSBen Gardon * Leaving record_dirty_log unset in that case prevents page 701fe43fa2fSBen Gardon * writes from being double counted. 702fe43fa2fSBen Gardon */ 703626808d1SSean Christopherson static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, 704626808d1SSean Christopherson u64 old_spte, u64 new_spte, gfn_t gfn, int level, 705626808d1SSean Christopherson bool record_acc_track, bool record_dirty_log) 706faaf05b0SBen Gardon { 707531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 7083a9a4aa5SBen Gardon 70908f07c80SBen Gardon /* 710966da62aSSean Christopherson * No thread should be using this function to set SPTEs to or from the 71108f07c80SBen Gardon * temporary removed SPTE value. 71208f07c80SBen Gardon * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic 71308f07c80SBen Gardon * should be used. If operating under the MMU lock in write mode, the 71408f07c80SBen Gardon * use of the removed SPTE should not be necessary. 71508f07c80SBen Gardon */ 716626808d1SSean Christopherson WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte)); 71708f07c80SBen Gardon 718626808d1SSean Christopherson kvm_tdp_mmu_write_spte(sptep, new_spte); 719faaf05b0SBen Gardon 720626808d1SSean Christopherson __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); 721626808d1SSean Christopherson 722f8e14497SBen Gardon if (record_acc_track) 723626808d1SSean Christopherson handle_changed_spte_acc_track(old_spte, new_spte, level); 724a6a0b05dSBen Gardon if (record_dirty_log) 725626808d1SSean Christopherson handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, 726626808d1SSean Christopherson new_spte, level); 727626808d1SSean Christopherson } 728626808d1SSean Christopherson 729626808d1SSean Christopherson static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 730626808d1SSean Christopherson u64 new_spte, bool record_acc_track, 731626808d1SSean Christopherson bool record_dirty_log) 732626808d1SSean Christopherson { 733626808d1SSean Christopherson WARN_ON_ONCE(iter->yielded); 734626808d1SSean Christopherson 735626808d1SSean Christopherson __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte, 736626808d1SSean Christopherson new_spte, iter->gfn, iter->level, 737626808d1SSean Christopherson record_acc_track, record_dirty_log); 738f8e14497SBen Gardon } 739f8e14497SBen Gardon 740f8e14497SBen Gardon static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, 741f8e14497SBen Gardon u64 new_spte) 742f8e14497SBen Gardon { 743626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); 744f8e14497SBen Gardon } 745f8e14497SBen Gardon 746f8e14497SBen Gardon static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, 747f8e14497SBen Gardon struct tdp_iter *iter, 748f8e14497SBen Gardon u64 new_spte) 749f8e14497SBen Gardon { 750626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); 751a6a0b05dSBen Gardon } 752a6a0b05dSBen Gardon 753a6a0b05dSBen Gardon static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, 754a6a0b05dSBen Gardon struct tdp_iter *iter, 755a6a0b05dSBen Gardon u64 new_spte) 756a6a0b05dSBen Gardon { 757626808d1SSean Christopherson _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); 758faaf05b0SBen Gardon } 759faaf05b0SBen Gardon 760faaf05b0SBen Gardon #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ 76177aa6075SDavid Matlack for_each_tdp_pte(_iter, _root, _start, _end) 762faaf05b0SBen Gardon 763f8e14497SBen Gardon #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ 764f8e14497SBen Gardon tdp_root_for_each_pte(_iter, _root, _start, _end) \ 765f8e14497SBen Gardon if (!is_shadow_present_pte(_iter.old_spte) || \ 766f8e14497SBen Gardon !is_last_spte(_iter.old_spte, _iter.level)) \ 767f8e14497SBen Gardon continue; \ 768f8e14497SBen Gardon else 769f8e14497SBen Gardon 770bb18842eSBen Gardon #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ 771b9e5603cSPaolo Bonzini for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end) 772bb18842eSBen Gardon 773faaf05b0SBen Gardon /* 774e28a436cSBen Gardon * Yield if the MMU lock is contended or this thread needs to return control 775e28a436cSBen Gardon * to the scheduler. 776e28a436cSBen Gardon * 777e139a34eSBen Gardon * If this function should yield and flush is set, it will perform a remote 778e139a34eSBen Gardon * TLB flush before yielding. 779e139a34eSBen Gardon * 7803a0f64deSSean Christopherson * If this function yields, iter->yielded is set and the caller must skip to 7813a0f64deSSean Christopherson * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk 7823a0f64deSSean Christopherson * over the paging structures to allow the iterator to continue its traversal 7833a0f64deSSean Christopherson * from the paging structure root. 784e28a436cSBen Gardon * 7853a0f64deSSean Christopherson * Returns true if this function yielded. 786e28a436cSBen Gardon */ 7873a0f64deSSean Christopherson static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, 7883a0f64deSSean Christopherson struct tdp_iter *iter, 7893a0f64deSSean Christopherson bool flush, bool shared) 790a6a0b05dSBen Gardon { 7913a0f64deSSean Christopherson WARN_ON(iter->yielded); 7923a0f64deSSean Christopherson 793ed5e484bSBen Gardon /* Ensure forward progress has been made before yielding. */ 794ed5e484bSBen Gardon if (iter->next_last_level_gfn == iter->yielded_gfn) 795ed5e484bSBen Gardon return false; 796ed5e484bSBen Gardon 797531810caSBen Gardon if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 798e139a34eSBen Gardon if (flush) 799e139a34eSBen Gardon kvm_flush_remote_tlbs(kvm); 800e139a34eSBen Gardon 801bd296779SSean Christopherson rcu_read_unlock(); 802bd296779SSean Christopherson 8036103bc07SBen Gardon if (shared) 8046103bc07SBen Gardon cond_resched_rwlock_read(&kvm->mmu_lock); 8056103bc07SBen Gardon else 806531810caSBen Gardon cond_resched_rwlock_write(&kvm->mmu_lock); 8076103bc07SBen Gardon 8087cca2d0bSBen Gardon rcu_read_lock(); 809ed5e484bSBen Gardon 810ed5e484bSBen Gardon WARN_ON(iter->gfn > iter->next_last_level_gfn); 811ed5e484bSBen Gardon 8123a0f64deSSean Christopherson iter->yielded = true; 813a6a0b05dSBen Gardon } 814e28a436cSBen Gardon 8153a0f64deSSean Christopherson return iter->yielded; 816a6a0b05dSBen Gardon } 817a6a0b05dSBen Gardon 818*86931ff7SSean Christopherson static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 819e2b5b21dSSean Christopherson { 820e2b5b21dSSean Christopherson /* 821*86931ff7SSean Christopherson * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 822*86931ff7SSean Christopherson * a gpa range that would exceed the max gfn, and KVM does not create 823*86931ff7SSean Christopherson * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 824*86931ff7SSean Christopherson * the slow emulation path every time. 825e2b5b21dSSean Christopherson */ 826*86931ff7SSean Christopherson return kvm_mmu_max_gfn() + 1; 827e2b5b21dSSean Christopherson } 828e2b5b21dSSean Christopherson 8291b6043e8SSean Christopherson static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8301b6043e8SSean Christopherson bool shared, int zap_level) 831e2b5b21dSSean Christopherson { 832e2b5b21dSSean Christopherson struct tdp_iter iter; 833e2b5b21dSSean Christopherson 834*86931ff7SSean Christopherson gfn_t end = tdp_mmu_max_gfn_exclusive(); 835e2b5b21dSSean Christopherson gfn_t start = 0; 836e2b5b21dSSean Christopherson 8371b6043e8SSean Christopherson for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { 8381b6043e8SSean Christopherson retry: 8391b6043e8SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 8401b6043e8SSean Christopherson continue; 8411b6043e8SSean Christopherson 8421b6043e8SSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 8431b6043e8SSean Christopherson continue; 8441b6043e8SSean Christopherson 8451b6043e8SSean Christopherson if (iter.level > zap_level) 8461b6043e8SSean Christopherson continue; 8471b6043e8SSean Christopherson 8481b6043e8SSean Christopherson if (!shared) 8491b6043e8SSean Christopherson tdp_mmu_set_spte(kvm, &iter, 0); 8501b6043e8SSean Christopherson else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) 8511b6043e8SSean Christopherson goto retry; 8521b6043e8SSean Christopherson } 8531b6043e8SSean Christopherson } 8541b6043e8SSean Christopherson 8551b6043e8SSean Christopherson static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, 8561b6043e8SSean Christopherson bool shared) 8571b6043e8SSean Christopherson { 8581b6043e8SSean Christopherson 8598351779cSPaolo Bonzini /* 8608351779cSPaolo Bonzini * The root must have an elevated refcount so that it's reachable via 8618351779cSPaolo Bonzini * mmu_notifier callbacks, which allows this path to yield and drop 8628351779cSPaolo Bonzini * mmu_lock. When handling an unmap/release mmu_notifier command, KVM 8638351779cSPaolo Bonzini * must drop all references to relevant pages prior to completing the 8648351779cSPaolo Bonzini * callback. Dropping mmu_lock with an unreachable root would result 8658351779cSPaolo Bonzini * in zapping SPTEs after a relevant mmu_notifier callback completes 8668351779cSPaolo Bonzini * and lead to use-after-free as zapping a SPTE triggers "writeback" of 8678351779cSPaolo Bonzini * dirty accessed bits to the SPTE's associated struct page. 8688351779cSPaolo Bonzini */ 8698351779cSPaolo Bonzini WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); 8708351779cSPaolo Bonzini 871e2b5b21dSSean Christopherson kvm_lockdep_assert_mmu_lock_held(kvm, shared); 872e2b5b21dSSean Christopherson 873e2b5b21dSSean Christopherson rcu_read_lock(); 874e2b5b21dSSean Christopherson 875e2b5b21dSSean Christopherson /* 8761b6043e8SSean Christopherson * To avoid RCU stalls due to recursively removing huge swaths of SPs, 8771b6043e8SSean Christopherson * split the zap into two passes. On the first pass, zap at the 1gb 8781b6043e8SSean Christopherson * level, and then zap top-level SPs on the second pass. "1gb" is not 8791b6043e8SSean Christopherson * arbitrary, as KVM must be able to zap a 1gb shadow page without 8801b6043e8SSean Christopherson * inducing a stall to allow in-place replacement with a 1gb hugepage. 8811b6043e8SSean Christopherson * 8821b6043e8SSean Christopherson * Because zapping a SP recurses on its children, stepping down to 8831b6043e8SSean Christopherson * PG_LEVEL_4K in the iterator itself is unnecessary. 884e2b5b21dSSean Christopherson */ 8851b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); 8861b6043e8SSean Christopherson __tdp_mmu_zap_root(kvm, root, shared, root->role.level); 887e2b5b21dSSean Christopherson 888e2b5b21dSSean Christopherson rcu_read_unlock(); 889e2b5b21dSSean Christopherson } 890e2b5b21dSSean Christopherson 891c10743a1SSean Christopherson bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 892c10743a1SSean Christopherson { 893c10743a1SSean Christopherson u64 old_spte; 894c10743a1SSean Christopherson 895c10743a1SSean Christopherson /* 896c10743a1SSean Christopherson * This helper intentionally doesn't allow zapping a root shadow page, 897c10743a1SSean Christopherson * which doesn't have a parent page table and thus no associated entry. 898c10743a1SSean Christopherson */ 899c10743a1SSean Christopherson if (WARN_ON_ONCE(!sp->ptep)) 900c10743a1SSean Christopherson return false; 901c10743a1SSean Christopherson 902c10743a1SSean Christopherson old_spte = kvm_tdp_mmu_read_spte(sp->ptep); 903bb95dfb9SSean Christopherson if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) 904c10743a1SSean Christopherson return false; 905c10743a1SSean Christopherson 906c10743a1SSean Christopherson __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, 907c10743a1SSean Christopherson sp->gfn, sp->role.level + 1, true, true); 908c10743a1SSean Christopherson 909c10743a1SSean Christopherson return true; 910c10743a1SSean Christopherson } 911c10743a1SSean Christopherson 912faaf05b0SBen Gardon /* 913f47e5bbbSSean Christopherson * Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs 914f47e5bbbSSean Christopherson * have been cleared and a TLB flush is needed before releasing the MMU lock. 9156103bc07SBen Gardon * 916063afacdSBen Gardon * If can_yield is true, will release the MMU lock and reschedule if the 917063afacdSBen Gardon * scheduler needs the CPU or there is contention on the MMU lock. If this 918063afacdSBen Gardon * function cannot yield, it will not release the MMU lock or reschedule and 919063afacdSBen Gardon * the caller must ensure it does not supply too large a GFN range, or the 9206103bc07SBen Gardon * operation can cause a soft lockup. 921faaf05b0SBen Gardon */ 922f47e5bbbSSean Christopherson static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, 923acbda82aSSean Christopherson gfn_t start, gfn_t end, bool can_yield, bool flush) 924faaf05b0SBen Gardon { 925faaf05b0SBen Gardon struct tdp_iter iter; 926faaf05b0SBen Gardon 927*86931ff7SSean Christopherson end = min(end, tdp_mmu_max_gfn_exclusive()); 928524a1e4eSSean Christopherson 929acbda82aSSean Christopherson lockdep_assert_held_write(&kvm->mmu_lock); 9306103bc07SBen Gardon 9317cca2d0bSBen Gardon rcu_read_lock(); 9327cca2d0bSBen Gardon 933f47e5bbbSSean Christopherson for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { 9341af4a960SBen Gardon if (can_yield && 935acbda82aSSean Christopherson tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { 936a835429cSSean Christopherson flush = false; 9371af4a960SBen Gardon continue; 9381af4a960SBen Gardon } 9391af4a960SBen Gardon 940f47e5bbbSSean Christopherson if (!is_shadow_present_pte(iter.old_spte) || 941faaf05b0SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 942faaf05b0SBen Gardon continue; 943faaf05b0SBen Gardon 944faaf05b0SBen Gardon tdp_mmu_set_spte(kvm, &iter, 0); 945a835429cSSean Christopherson flush = true; 946faaf05b0SBen Gardon } 9477cca2d0bSBen Gardon 9487cca2d0bSBen Gardon rcu_read_unlock(); 949bb95dfb9SSean Christopherson 950f47e5bbbSSean Christopherson /* 951f47e5bbbSSean Christopherson * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need 952f47e5bbbSSean Christopherson * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. 953f47e5bbbSSean Christopherson */ 954f47e5bbbSSean Christopherson return flush; 955faaf05b0SBen Gardon } 956faaf05b0SBen Gardon 957faaf05b0SBen Gardon /* 958faaf05b0SBen Gardon * Tears down the mappings for the range of gfns, [start, end), and frees the 959faaf05b0SBen Gardon * non-root pages mapping GFNs strictly within that range. Returns true if 960faaf05b0SBen Gardon * SPTEs have been cleared and a TLB flush is needed before releasing the 961faaf05b0SBen Gardon * MMU lock. 962faaf05b0SBen Gardon */ 963f47e5bbbSSean Christopherson bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, 964f47e5bbbSSean Christopherson bool can_yield, bool flush) 965faaf05b0SBen Gardon { 966faaf05b0SBen Gardon struct kvm_mmu_page *root; 967faaf05b0SBen Gardon 968614f6970SPaolo Bonzini for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) 969f47e5bbbSSean Christopherson flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush); 970faaf05b0SBen Gardon 971faaf05b0SBen Gardon return flush; 972faaf05b0SBen Gardon } 973faaf05b0SBen Gardon 974faaf05b0SBen Gardon void kvm_tdp_mmu_zap_all(struct kvm *kvm) 975faaf05b0SBen Gardon { 976e2b5b21dSSean Christopherson struct kvm_mmu_page *root; 9772b9663d8SSean Christopherson int i; 978faaf05b0SBen Gardon 97977c8cd6bSSean Christopherson /* 98022b94c4bSPaolo Bonzini * Zap all roots, including invalid roots, as all SPTEs must be dropped 98122b94c4bSPaolo Bonzini * before returning to the caller. Zap directly even if the root is 98222b94c4bSPaolo Bonzini * also being zapped by a worker. Walking zapped top-level SPTEs isn't 98322b94c4bSPaolo Bonzini * all that expensive and mmu_lock is already held, which means the 98422b94c4bSPaolo Bonzini * worker has yielded, i.e. flushing the work instead of zapping here 98522b94c4bSPaolo Bonzini * isn't guaranteed to be any faster. 98622b94c4bSPaolo Bonzini * 98777c8cd6bSSean Christopherson * A TLB flush is unnecessary, KVM zaps everything if and only the VM 98877c8cd6bSSean Christopherson * is being destroyed or the userspace VMM has exited. In both cases, 98977c8cd6bSSean Christopherson * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. 99077c8cd6bSSean Christopherson */ 991e2b5b21dSSean Christopherson for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 992e2b5b21dSSean Christopherson for_each_tdp_mmu_root_yield_safe(kvm, root, i) 993e2b5b21dSSean Christopherson tdp_mmu_zap_root(kvm, root, false); 994e2b5b21dSSean Christopherson } 995faaf05b0SBen Gardon } 996bb18842eSBen Gardon 9974c6654bdSBen Gardon /* 998f28e9c7fSSean Christopherson * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast 99922b94c4bSPaolo Bonzini * zap" completes. 10004c6654bdSBen Gardon */ 10014c6654bdSBen Gardon void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) 10024c6654bdSBen Gardon { 100322b94c4bSPaolo Bonzini flush_workqueue(kvm->arch.tdp_mmu_zap_wq); 10044c6654bdSBen Gardon } 10054c6654bdSBen Gardon 1006bb18842eSBen Gardon /* 1007f28e9c7fSSean Christopherson * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that 100822b94c4bSPaolo Bonzini * is about to be zapped, e.g. in response to a memslots update. The actual 100922b94c4bSPaolo Bonzini * zapping is performed asynchronously, so a reference is taken on all roots. 101022b94c4bSPaolo Bonzini * Using a separate workqueue makes it easy to ensure that the destruction is 101122b94c4bSPaolo Bonzini * performed before the "fast zap" completes, without keeping a separate list 101222b94c4bSPaolo Bonzini * of invalidated roots; the list is effectively the list of work items in 101322b94c4bSPaolo Bonzini * the workqueue. 1014b7cccd39SBen Gardon * 101522b94c4bSPaolo Bonzini * Get a reference even if the root is already invalid, the asynchronous worker 101622b94c4bSPaolo Bonzini * assumes it was gifted a reference to the root it processes. Because mmu_lock 101722b94c4bSPaolo Bonzini * is held for write, it should be impossible to observe a root with zero refcount, 101822b94c4bSPaolo Bonzini * i.e. the list of roots cannot be stale. 10194c6654bdSBen Gardon * 1020b7cccd39SBen Gardon * This has essentially the same effect for the TDP MMU 1021b7cccd39SBen Gardon * as updating mmu_valid_gen does for the shadow MMU. 1022b7cccd39SBen Gardon */ 1023b7cccd39SBen Gardon void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) 1024b7cccd39SBen Gardon { 1025b7cccd39SBen Gardon struct kvm_mmu_page *root; 1026b7cccd39SBen Gardon 1027b7cccd39SBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1028f28e9c7fSSean Christopherson list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { 1029efd995daSPaolo Bonzini if (!root->role.invalid && 1030efd995daSPaolo Bonzini !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { 1031b7cccd39SBen Gardon root->role.invalid = true; 103222b94c4bSPaolo Bonzini tdp_mmu_schedule_zap_root(kvm, root); 103322b94c4bSPaolo Bonzini } 1034b7cccd39SBen Gardon } 1035f28e9c7fSSean Christopherson } 1036b7cccd39SBen Gardon 1037bb18842eSBen Gardon /* 1038bb18842eSBen Gardon * Installs a last-level SPTE to handle a TDP page fault. 1039bb18842eSBen Gardon * (NPT/EPT violation/misconfiguration) 1040bb18842eSBen Gardon */ 1041cdc47767SPaolo Bonzini static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, 1042cdc47767SPaolo Bonzini struct kvm_page_fault *fault, 1043cdc47767SPaolo Bonzini struct tdp_iter *iter) 1044bb18842eSBen Gardon { 1045c435d4b7SSean Christopherson struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); 1046bb18842eSBen Gardon u64 new_spte; 104757a3e96dSKai Huang int ret = RET_PF_FIXED; 1048ad67e480SPaolo Bonzini bool wrprot = false; 1049bb18842eSBen Gardon 10507158bee4SPaolo Bonzini WARN_ON(sp->role.level != fault->goal_level); 1051e710c5f6SDavid Matlack if (unlikely(!fault->slot)) 1052bb18842eSBen Gardon new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); 10539a77daacSBen Gardon else 105453597858SDavid Matlack wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 10552839180cSPaolo Bonzini fault->pfn, iter->old_spte, fault->prefetch, true, 10567158bee4SPaolo Bonzini fault->map_writable, &new_spte); 1057bb18842eSBen Gardon 1058bb18842eSBen Gardon if (new_spte == iter->old_spte) 1059bb18842eSBen Gardon ret = RET_PF_SPURIOUS; 10603e72c791SDavid Matlack else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) 10619a77daacSBen Gardon return RET_PF_RETRY; 1062bb95dfb9SSean Christopherson else if (is_shadow_present_pte(iter->old_spte) && 1063bb95dfb9SSean Christopherson !is_last_spte(iter->old_spte, iter->level)) 1064bb95dfb9SSean Christopherson kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, 1065bb95dfb9SSean Christopherson KVM_PAGES_PER_HPAGE(iter->level + 1)); 1066bb18842eSBen Gardon 1067bb18842eSBen Gardon /* 1068bb18842eSBen Gardon * If the page fault was caused by a write but the page is write 1069bb18842eSBen Gardon * protected, emulation is needed. If the emulation was skipped, 1070bb18842eSBen Gardon * the vCPU would have the same fault again. 1071bb18842eSBen Gardon */ 1072ad67e480SPaolo Bonzini if (wrprot) { 1073cdc47767SPaolo Bonzini if (fault->write) 1074bb18842eSBen Gardon ret = RET_PF_EMULATE; 1075bb18842eSBen Gardon } 1076bb18842eSBen Gardon 1077bb18842eSBen Gardon /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ 10789a77daacSBen Gardon if (unlikely(is_mmio_spte(new_spte))) { 10799a77daacSBen Gardon trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, 10809a77daacSBen Gardon new_spte); 1081bb18842eSBen Gardon ret = RET_PF_EMULATE; 10823849e092SSean Christopherson } else { 10839a77daacSBen Gardon trace_kvm_mmu_set_spte(iter->level, iter->gfn, 10849a77daacSBen Gardon rcu_dereference(iter->sptep)); 10853849e092SSean Christopherson } 1086bb18842eSBen Gardon 1087857f8474SKai Huang /* 1088857f8474SKai Huang * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be 1089857f8474SKai Huang * consistent with legacy MMU behavior. 1090857f8474SKai Huang */ 1091857f8474SKai Huang if (ret != RET_PF_SPURIOUS) 1092bb18842eSBen Gardon vcpu->stat.pf_fixed++; 1093bb18842eSBen Gardon 1094bb18842eSBen Gardon return ret; 1095bb18842eSBen Gardon } 1096bb18842eSBen Gardon 1097bb18842eSBen Gardon /* 1098cb00a70bSDavid Matlack * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1099cb00a70bSDavid Matlack * provided page table. 11007b7e1ab6SDavid Matlack * 11017b7e1ab6SDavid Matlack * @kvm: kvm instance 11027b7e1ab6SDavid Matlack * @iter: a tdp_iter instance currently on the SPTE that should be set 11037b7e1ab6SDavid Matlack * @sp: The new TDP page table to install. 11047b7e1ab6SDavid Matlack * @account_nx: True if this page table is being installed to split a 11057b7e1ab6SDavid Matlack * non-executable huge page. 1106cb00a70bSDavid Matlack * @shared: This operation is running under the MMU lock in read mode. 11077b7e1ab6SDavid Matlack * 11087b7e1ab6SDavid Matlack * Returns: 0 if the new page table was installed. Non-0 if the page table 11097b7e1ab6SDavid Matlack * could not be installed (e.g. the atomic compare-exchange failed). 11107b7e1ab6SDavid Matlack */ 1111cb00a70bSDavid Matlack static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, 1112cb00a70bSDavid Matlack struct kvm_mmu_page *sp, bool account_nx, 1113cb00a70bSDavid Matlack bool shared) 11147b7e1ab6SDavid Matlack { 11157b7e1ab6SDavid Matlack u64 spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask); 1116cb00a70bSDavid Matlack int ret = 0; 11177b7e1ab6SDavid Matlack 1118cb00a70bSDavid Matlack if (shared) { 11197b7e1ab6SDavid Matlack ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); 11207b7e1ab6SDavid Matlack if (ret) 11217b7e1ab6SDavid Matlack return ret; 1122cb00a70bSDavid Matlack } else { 1123cb00a70bSDavid Matlack tdp_mmu_set_spte(kvm, iter, spte); 1124cb00a70bSDavid Matlack } 11257b7e1ab6SDavid Matlack 11267b7e1ab6SDavid Matlack spin_lock(&kvm->arch.tdp_mmu_pages_lock); 11277b7e1ab6SDavid Matlack list_add(&sp->link, &kvm->arch.tdp_mmu_pages); 11287b7e1ab6SDavid Matlack if (account_nx) 11297b7e1ab6SDavid Matlack account_huge_nx_page(kvm, sp); 11307b7e1ab6SDavid Matlack spin_unlock(&kvm->arch.tdp_mmu_pages_lock); 11317b7e1ab6SDavid Matlack 11327b7e1ab6SDavid Matlack return 0; 11337b7e1ab6SDavid Matlack } 11347b7e1ab6SDavid Matlack 11357b7e1ab6SDavid Matlack /* 1136bb18842eSBen Gardon * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing 1137bb18842eSBen Gardon * page tables and SPTEs to translate the faulting guest physical address. 1138bb18842eSBen Gardon */ 11392f6305ddSPaolo Bonzini int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) 1140bb18842eSBen Gardon { 1141bb18842eSBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 1142bb18842eSBen Gardon struct tdp_iter iter; 114389c0fd49SBen Gardon struct kvm_mmu_page *sp; 1144bb18842eSBen Gardon int ret; 1145bb18842eSBen Gardon 114673a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault); 1147bb18842eSBen Gardon 1148f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault); 11497cca2d0bSBen Gardon 11507cca2d0bSBen Gardon rcu_read_lock(); 11517cca2d0bSBen Gardon 11522f6305ddSPaolo Bonzini tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { 115373a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled) 1154536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); 1155bb18842eSBen Gardon 115673a3c659SPaolo Bonzini if (iter.level == fault->goal_level) 1157bb18842eSBen Gardon break; 1158bb18842eSBen Gardon 1159bb18842eSBen Gardon /* 1160bb18842eSBen Gardon * If there is an SPTE mapping a large page at a higher level 1161bb18842eSBen Gardon * than the target, that SPTE must be cleared and replaced 1162bb18842eSBen Gardon * with a non-leaf SPTE. 1163bb18842eSBen Gardon */ 1164bb18842eSBen Gardon if (is_shadow_present_pte(iter.old_spte) && 1165bb18842eSBen Gardon is_large_pte(iter.old_spte)) { 11663e72c791SDavid Matlack if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter)) 11679a77daacSBen Gardon break; 1168bb18842eSBen Gardon 1169bb18842eSBen Gardon /* 1170bb18842eSBen Gardon * The iter must explicitly re-read the spte here 1171bb18842eSBen Gardon * because the new value informs the !present 1172bb18842eSBen Gardon * path below. 1173bb18842eSBen Gardon */ 11740e587aa7SSean Christopherson iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep); 1175bb18842eSBen Gardon } 1176bb18842eSBen Gardon 1177bb18842eSBen Gardon if (!is_shadow_present_pte(iter.old_spte)) { 11787b7e1ab6SDavid Matlack bool account_nx = fault->huge_page_disallowed && 11797b7e1ab6SDavid Matlack fault->req_level >= iter.level; 11807b7e1ab6SDavid Matlack 1181ff76d506SKai Huang /* 1182c4342633SIngo Molnar * If SPTE has been frozen by another thread, just 1183ff76d506SKai Huang * give up and retry, avoiding unnecessary page table 1184ff76d506SKai Huang * allocation and free. 1185ff76d506SKai Huang */ 1186ff76d506SKai Huang if (is_removed_spte(iter.old_spte)) 1187ff76d506SKai Huang break; 1188ff76d506SKai Huang 1189a82070b6SDavid Matlack sp = tdp_mmu_alloc_sp(vcpu); 1190a82070b6SDavid Matlack tdp_mmu_init_child_sp(sp, &iter); 1191a82070b6SDavid Matlack 1192cb00a70bSDavid Matlack if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) { 11939a77daacSBen Gardon tdp_mmu_free_sp(sp); 11949a77daacSBen Gardon break; 11959a77daacSBen Gardon } 1196bb18842eSBen Gardon } 1197bb18842eSBen Gardon } 1198bb18842eSBen Gardon 119958298b06SSean Christopherson /* 120058298b06SSean Christopherson * Force the guest to retry the access if the upper level SPTEs aren't 120158298b06SSean Christopherson * in place, or if the target leaf SPTE is frozen by another CPU. 120258298b06SSean Christopherson */ 120358298b06SSean Christopherson if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) { 12047cca2d0bSBen Gardon rcu_read_unlock(); 1205bb18842eSBen Gardon return RET_PF_RETRY; 12067cca2d0bSBen Gardon } 1207bb18842eSBen Gardon 1208cdc47767SPaolo Bonzini ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); 12097cca2d0bSBen Gardon rcu_read_unlock(); 1210bb18842eSBen Gardon 1211bb18842eSBen Gardon return ret; 1212bb18842eSBen Gardon } 1213063afacdSBen Gardon 12143039bcc7SSean Christopherson bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 12153039bcc7SSean Christopherson bool flush) 1216063afacdSBen Gardon { 1217f47e5bbbSSean Christopherson return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start, 121883b83a02SSean Christopherson range->end, range->may_block, flush); 12193039bcc7SSean Christopherson } 12203039bcc7SSean Christopherson 12213039bcc7SSean Christopherson typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, 12223039bcc7SSean Christopherson struct kvm_gfn_range *range); 12233039bcc7SSean Christopherson 12243039bcc7SSean Christopherson static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, 12253039bcc7SSean Christopherson struct kvm_gfn_range *range, 1226c1b91493SSean Christopherson tdp_handler_t handler) 1227063afacdSBen Gardon { 1228063afacdSBen Gardon struct kvm_mmu_page *root; 12293039bcc7SSean Christopherson struct tdp_iter iter; 12303039bcc7SSean Christopherson bool ret = false; 1231063afacdSBen Gardon 1232063afacdSBen Gardon /* 1233e1eed584SSean Christopherson * Don't support rescheduling, none of the MMU notifiers that funnel 1234e1eed584SSean Christopherson * into this helper allow blocking; it'd be dead, wasteful code. 1235063afacdSBen Gardon */ 12363039bcc7SSean Christopherson for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { 1237a151acecSSean Christopherson rcu_read_lock(); 1238a151acecSSean Christopherson 12393039bcc7SSean Christopherson tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) 12403039bcc7SSean Christopherson ret |= handler(kvm, &iter, range); 1241063afacdSBen Gardon 12423039bcc7SSean Christopherson rcu_read_unlock(); 1243a151acecSSean Christopherson } 1244063afacdSBen Gardon 1245063afacdSBen Gardon return ret; 1246063afacdSBen Gardon } 1247063afacdSBen Gardon 1248f8e14497SBen Gardon /* 1249f8e14497SBen Gardon * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero 1250f8e14497SBen Gardon * if any of the GFNs in the range have been accessed. 1251f8e14497SBen Gardon */ 12523039bcc7SSean Christopherson static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, 12533039bcc7SSean Christopherson struct kvm_gfn_range *range) 1254f8e14497SBen Gardon { 1255f8e14497SBen Gardon u64 new_spte = 0; 1256f8e14497SBen Gardon 12573039bcc7SSean Christopherson /* If we have a non-accessed entry we don't need to change the pte. */ 12583039bcc7SSean Christopherson if (!is_accessed_spte(iter->old_spte)) 12593039bcc7SSean Christopherson return false; 12607cca2d0bSBen Gardon 12613039bcc7SSean Christopherson new_spte = iter->old_spte; 1262f8e14497SBen Gardon 1263f8e14497SBen Gardon if (spte_ad_enabled(new_spte)) { 12648f8f52a4SSean Christopherson new_spte &= ~shadow_accessed_mask; 1265f8e14497SBen Gardon } else { 1266f8e14497SBen Gardon /* 1267f8e14497SBen Gardon * Capture the dirty status of the page, so that it doesn't get 1268f8e14497SBen Gardon * lost when the SPTE is marked for access tracking. 1269f8e14497SBen Gardon */ 1270f8e14497SBen Gardon if (is_writable_pte(new_spte)) 1271f8e14497SBen Gardon kvm_set_pfn_dirty(spte_to_pfn(new_spte)); 1272f8e14497SBen Gardon 1273f8e14497SBen Gardon new_spte = mark_spte_for_access_track(new_spte); 1274f8e14497SBen Gardon } 1275f8e14497SBen Gardon 12763039bcc7SSean Christopherson tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); 127733dd3574SBen Gardon 12783039bcc7SSean Christopherson return true; 1279f8e14497SBen Gardon } 1280f8e14497SBen Gardon 12813039bcc7SSean Christopherson bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1282f8e14497SBen Gardon { 12833039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); 1284f8e14497SBen Gardon } 1285f8e14497SBen Gardon 12863039bcc7SSean Christopherson static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, 12873039bcc7SSean Christopherson struct kvm_gfn_range *range) 1288f8e14497SBen Gardon { 12893039bcc7SSean Christopherson return is_accessed_spte(iter->old_spte); 1290f8e14497SBen Gardon } 1291f8e14497SBen Gardon 12923039bcc7SSean Christopherson bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1293f8e14497SBen Gardon { 12943039bcc7SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); 12953039bcc7SSean Christopherson } 12963039bcc7SSean Christopherson 12973039bcc7SSean Christopherson static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, 12983039bcc7SSean Christopherson struct kvm_gfn_range *range) 12993039bcc7SSean Christopherson { 13003039bcc7SSean Christopherson u64 new_spte; 13013039bcc7SSean Christopherson 13023039bcc7SSean Christopherson /* Huge pages aren't expected to be modified without first being zapped. */ 13033039bcc7SSean Christopherson WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); 13043039bcc7SSean Christopherson 13053039bcc7SSean Christopherson if (iter->level != PG_LEVEL_4K || 13063039bcc7SSean Christopherson !is_shadow_present_pte(iter->old_spte)) 13073039bcc7SSean Christopherson return false; 13083039bcc7SSean Christopherson 13093039bcc7SSean Christopherson /* 13103039bcc7SSean Christopherson * Note, when changing a read-only SPTE, it's not strictly necessary to 13113039bcc7SSean Christopherson * zero the SPTE before setting the new PFN, but doing so preserves the 13123039bcc7SSean Christopherson * invariant that the PFN of a present * leaf SPTE can never change. 13133039bcc7SSean Christopherson * See __handle_changed_spte(). 13143039bcc7SSean Christopherson */ 13153039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, 0); 13163039bcc7SSean Christopherson 13173039bcc7SSean Christopherson if (!pte_write(range->pte)) { 13183039bcc7SSean Christopherson new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, 13193039bcc7SSean Christopherson pte_pfn(range->pte)); 13203039bcc7SSean Christopherson 13213039bcc7SSean Christopherson tdp_mmu_set_spte(kvm, iter, new_spte); 13223039bcc7SSean Christopherson } 13233039bcc7SSean Christopherson 13243039bcc7SSean Christopherson return true; 1325f8e14497SBen Gardon } 13261d8dd6b3SBen Gardon 13271d8dd6b3SBen Gardon /* 13281d8dd6b3SBen Gardon * Handle the changed_pte MMU notifier for the TDP MMU. 13291d8dd6b3SBen Gardon * data is a pointer to the new pte_t mapping the HVA specified by the MMU 13301d8dd6b3SBen Gardon * notifier. 13311d8dd6b3SBen Gardon * Returns non-zero if a flush is needed before releasing the MMU lock. 13321d8dd6b3SBen Gardon */ 13333039bcc7SSean Christopherson bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 13341d8dd6b3SBen Gardon { 133593fa50f6SSean Christopherson /* 133693fa50f6SSean Christopherson * No need to handle the remote TLB flush under RCU protection, the 133793fa50f6SSean Christopherson * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a 133893fa50f6SSean Christopherson * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). 133993fa50f6SSean Christopherson */ 134093fa50f6SSean Christopherson return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); 13411d8dd6b3SBen Gardon } 13421d8dd6b3SBen Gardon 1343a6a0b05dSBen Gardon /* 1344bedd9195SDavid Matlack * Remove write access from all SPTEs at or above min_level that map GFNs 1345bedd9195SDavid Matlack * [start, end). Returns true if an SPTE has been changed and the TLBs need to 1346bedd9195SDavid Matlack * be flushed. 1347a6a0b05dSBen Gardon */ 1348a6a0b05dSBen Gardon static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1349a6a0b05dSBen Gardon gfn_t start, gfn_t end, int min_level) 1350a6a0b05dSBen Gardon { 1351a6a0b05dSBen Gardon struct tdp_iter iter; 1352a6a0b05dSBen Gardon u64 new_spte; 1353a6a0b05dSBen Gardon bool spte_set = false; 1354a6a0b05dSBen Gardon 13557cca2d0bSBen Gardon rcu_read_lock(); 13567cca2d0bSBen Gardon 1357a6a0b05dSBen Gardon BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 1358a6a0b05dSBen Gardon 135977aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, start, end) { 136024ae4cfaSBen Gardon retry: 136124ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 13621af4a960SBen Gardon continue; 13631af4a960SBen Gardon 1364a6a0b05dSBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 13650f99ee2cSBen Gardon !is_last_spte(iter.old_spte, iter.level) || 13660f99ee2cSBen Gardon !(iter.old_spte & PT_WRITABLE_MASK)) 1367a6a0b05dSBen Gardon continue; 1368a6a0b05dSBen Gardon 1369a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1370a6a0b05dSBen Gardon 13713e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 137224ae4cfaSBen Gardon goto retry; 13733255530aSDavid Matlack 1374a6a0b05dSBen Gardon spte_set = true; 1375a6a0b05dSBen Gardon } 13767cca2d0bSBen Gardon 13777cca2d0bSBen Gardon rcu_read_unlock(); 1378a6a0b05dSBen Gardon return spte_set; 1379a6a0b05dSBen Gardon } 1380a6a0b05dSBen Gardon 1381a6a0b05dSBen Gardon /* 1382a6a0b05dSBen Gardon * Remove write access from all the SPTEs mapping GFNs in the memslot. Will 1383a6a0b05dSBen Gardon * only affect leaf SPTEs down to min_level. 1384a6a0b05dSBen Gardon * Returns true if an SPTE has been changed and the TLBs need to be flushed. 1385a6a0b05dSBen Gardon */ 1386269e9552SHamza Mahfooz bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 1387269e9552SHamza Mahfooz const struct kvm_memory_slot *slot, int min_level) 1388a6a0b05dSBen Gardon { 1389a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1390a6a0b05dSBen Gardon bool spte_set = false; 1391a6a0b05dSBen Gardon 139224ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1393a6a0b05dSBen Gardon 1394d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1395a6a0b05dSBen Gardon spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, 1396a6a0b05dSBen Gardon slot->base_gfn + slot->npages, min_level); 1397a6a0b05dSBen Gardon 1398a6a0b05dSBen Gardon return spte_set; 1399a6a0b05dSBen Gardon } 1400a6a0b05dSBen Gardon 1401a3fe5dbdSDavid Matlack static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) 1402a3fe5dbdSDavid Matlack { 1403a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1404a3fe5dbdSDavid Matlack 1405a3fe5dbdSDavid Matlack gfp |= __GFP_ZERO; 1406a3fe5dbdSDavid Matlack 1407a3fe5dbdSDavid Matlack sp = kmem_cache_alloc(mmu_page_header_cache, gfp); 1408a3fe5dbdSDavid Matlack if (!sp) 1409a3fe5dbdSDavid Matlack return NULL; 1410a3fe5dbdSDavid Matlack 1411a3fe5dbdSDavid Matlack sp->spt = (void *)__get_free_page(gfp); 1412a3fe5dbdSDavid Matlack if (!sp->spt) { 1413a3fe5dbdSDavid Matlack kmem_cache_free(mmu_page_header_cache, sp); 1414a3fe5dbdSDavid Matlack return NULL; 1415a3fe5dbdSDavid Matlack } 1416a3fe5dbdSDavid Matlack 1417a3fe5dbdSDavid Matlack return sp; 1418a3fe5dbdSDavid Matlack } 1419a3fe5dbdSDavid Matlack 1420a3fe5dbdSDavid Matlack static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, 1421cb00a70bSDavid Matlack struct tdp_iter *iter, 1422cb00a70bSDavid Matlack bool shared) 1423a3fe5dbdSDavid Matlack { 1424a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp; 1425a3fe5dbdSDavid Matlack 1426a3fe5dbdSDavid Matlack /* 1427a3fe5dbdSDavid Matlack * Since we are allocating while under the MMU lock we have to be 1428a3fe5dbdSDavid Matlack * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct 1429a3fe5dbdSDavid Matlack * reclaim and to avoid making any filesystem callbacks (which can end 1430a3fe5dbdSDavid Matlack * up invoking KVM MMU notifiers, resulting in a deadlock). 1431a3fe5dbdSDavid Matlack * 1432a3fe5dbdSDavid Matlack * If this allocation fails we drop the lock and retry with reclaim 1433a3fe5dbdSDavid Matlack * allowed. 1434a3fe5dbdSDavid Matlack */ 1435a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); 1436a3fe5dbdSDavid Matlack if (sp) 1437a3fe5dbdSDavid Matlack return sp; 1438a3fe5dbdSDavid Matlack 1439a3fe5dbdSDavid Matlack rcu_read_unlock(); 1440cb00a70bSDavid Matlack 1441cb00a70bSDavid Matlack if (shared) 1442a3fe5dbdSDavid Matlack read_unlock(&kvm->mmu_lock); 1443cb00a70bSDavid Matlack else 1444cb00a70bSDavid Matlack write_unlock(&kvm->mmu_lock); 1445a3fe5dbdSDavid Matlack 1446a3fe5dbdSDavid Matlack iter->yielded = true; 1447a3fe5dbdSDavid Matlack sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); 1448a3fe5dbdSDavid Matlack 1449cb00a70bSDavid Matlack if (shared) 1450a3fe5dbdSDavid Matlack read_lock(&kvm->mmu_lock); 1451cb00a70bSDavid Matlack else 1452cb00a70bSDavid Matlack write_lock(&kvm->mmu_lock); 1453cb00a70bSDavid Matlack 1454a3fe5dbdSDavid Matlack rcu_read_lock(); 1455a3fe5dbdSDavid Matlack 1456a3fe5dbdSDavid Matlack return sp; 1457a3fe5dbdSDavid Matlack } 1458a3fe5dbdSDavid Matlack 1459cb00a70bSDavid Matlack static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, 1460cb00a70bSDavid Matlack struct kvm_mmu_page *sp, bool shared) 1461a3fe5dbdSDavid Matlack { 1462a3fe5dbdSDavid Matlack const u64 huge_spte = iter->old_spte; 1463a3fe5dbdSDavid Matlack const int level = iter->level; 1464a3fe5dbdSDavid Matlack int ret, i; 1465a3fe5dbdSDavid Matlack 1466a3fe5dbdSDavid Matlack tdp_mmu_init_child_sp(sp, iter); 1467a3fe5dbdSDavid Matlack 1468a3fe5dbdSDavid Matlack /* 1469a3fe5dbdSDavid Matlack * No need for atomics when writing to sp->spt since the page table has 1470a3fe5dbdSDavid Matlack * not been linked in yet and thus is not reachable from any other CPU. 1471a3fe5dbdSDavid Matlack */ 1472a3fe5dbdSDavid Matlack for (i = 0; i < PT64_ENT_PER_PAGE; i++) 1473a3fe5dbdSDavid Matlack sp->spt[i] = make_huge_page_split_spte(huge_spte, level, i); 1474a3fe5dbdSDavid Matlack 1475a3fe5dbdSDavid Matlack /* 1476a3fe5dbdSDavid Matlack * Replace the huge spte with a pointer to the populated lower level 1477a3fe5dbdSDavid Matlack * page table. Since we are making this change without a TLB flush vCPUs 1478a3fe5dbdSDavid Matlack * will see a mix of the split mappings and the original huge mapping, 1479a3fe5dbdSDavid Matlack * depending on what's currently in their TLB. This is fine from a 1480a3fe5dbdSDavid Matlack * correctness standpoint since the translation will be the same either 1481a3fe5dbdSDavid Matlack * way. 1482a3fe5dbdSDavid Matlack */ 1483cb00a70bSDavid Matlack ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared); 1484a3fe5dbdSDavid Matlack if (ret) 1485e0b728b1SDavid Matlack goto out; 1486a3fe5dbdSDavid Matlack 1487a3fe5dbdSDavid Matlack /* 1488a3fe5dbdSDavid Matlack * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we 1489a3fe5dbdSDavid Matlack * are overwriting from the page stats. But we have to manually update 1490a3fe5dbdSDavid Matlack * the page stats with the new present child pages. 1491a3fe5dbdSDavid Matlack */ 1492a3fe5dbdSDavid Matlack kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE); 1493a3fe5dbdSDavid Matlack 1494e0b728b1SDavid Matlack out: 1495e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); 1496e0b728b1SDavid Matlack return ret; 1497a3fe5dbdSDavid Matlack } 1498a3fe5dbdSDavid Matlack 1499a3fe5dbdSDavid Matlack static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, 1500a3fe5dbdSDavid Matlack struct kvm_mmu_page *root, 1501a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1502cb00a70bSDavid Matlack int target_level, bool shared) 1503a3fe5dbdSDavid Matlack { 1504a3fe5dbdSDavid Matlack struct kvm_mmu_page *sp = NULL; 1505a3fe5dbdSDavid Matlack struct tdp_iter iter; 1506a3fe5dbdSDavid Matlack int ret = 0; 1507a3fe5dbdSDavid Matlack 1508a3fe5dbdSDavid Matlack rcu_read_lock(); 1509a3fe5dbdSDavid Matlack 1510a3fe5dbdSDavid Matlack /* 1511a3fe5dbdSDavid Matlack * Traverse the page table splitting all huge pages above the target 1512a3fe5dbdSDavid Matlack * level into one lower level. For example, if we encounter a 1GB page 1513a3fe5dbdSDavid Matlack * we split it into 512 2MB pages. 1514a3fe5dbdSDavid Matlack * 1515a3fe5dbdSDavid Matlack * Since the TDP iterator uses a pre-order traversal, we are guaranteed 1516a3fe5dbdSDavid Matlack * to visit an SPTE before ever visiting its children, which means we 1517a3fe5dbdSDavid Matlack * will correctly recursively split huge pages that are more than one 1518a3fe5dbdSDavid Matlack * level above the target level (e.g. splitting a 1GB to 512 2MB pages, 1519a3fe5dbdSDavid Matlack * and then splitting each of those to 512 4KB pages). 1520a3fe5dbdSDavid Matlack */ 1521a3fe5dbdSDavid Matlack for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { 1522a3fe5dbdSDavid Matlack retry: 1523cb00a70bSDavid Matlack if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) 1524a3fe5dbdSDavid Matlack continue; 1525a3fe5dbdSDavid Matlack 1526a3fe5dbdSDavid Matlack if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) 1527a3fe5dbdSDavid Matlack continue; 1528a3fe5dbdSDavid Matlack 1529a3fe5dbdSDavid Matlack if (!sp) { 1530cb00a70bSDavid Matlack sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); 1531a3fe5dbdSDavid Matlack if (!sp) { 1532a3fe5dbdSDavid Matlack ret = -ENOMEM; 1533e0b728b1SDavid Matlack trace_kvm_mmu_split_huge_page(iter.gfn, 1534e0b728b1SDavid Matlack iter.old_spte, 1535e0b728b1SDavid Matlack iter.level, ret); 1536a3fe5dbdSDavid Matlack break; 1537a3fe5dbdSDavid Matlack } 1538a3fe5dbdSDavid Matlack 1539a3fe5dbdSDavid Matlack if (iter.yielded) 1540a3fe5dbdSDavid Matlack continue; 1541a3fe5dbdSDavid Matlack } 1542a3fe5dbdSDavid Matlack 1543cb00a70bSDavid Matlack if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) 1544a3fe5dbdSDavid Matlack goto retry; 1545a3fe5dbdSDavid Matlack 1546a3fe5dbdSDavid Matlack sp = NULL; 1547a3fe5dbdSDavid Matlack } 1548a3fe5dbdSDavid Matlack 1549a3fe5dbdSDavid Matlack rcu_read_unlock(); 1550a3fe5dbdSDavid Matlack 1551a3fe5dbdSDavid Matlack /* 1552a3fe5dbdSDavid Matlack * It's possible to exit the loop having never used the last sp if, for 1553a3fe5dbdSDavid Matlack * example, a vCPU doing HugePage NX splitting wins the race and 1554a3fe5dbdSDavid Matlack * installs its own sp in place of the last sp we tried to split. 1555a3fe5dbdSDavid Matlack */ 1556a3fe5dbdSDavid Matlack if (sp) 1557a3fe5dbdSDavid Matlack tdp_mmu_free_sp(sp); 1558a3fe5dbdSDavid Matlack 1559a3fe5dbdSDavid Matlack return ret; 1560a3fe5dbdSDavid Matlack } 1561a3fe5dbdSDavid Matlack 1562cb00a70bSDavid Matlack 1563a3fe5dbdSDavid Matlack /* 1564a3fe5dbdSDavid Matlack * Try to split all huge pages mapped by the TDP MMU down to the target level. 1565a3fe5dbdSDavid Matlack */ 1566a3fe5dbdSDavid Matlack void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 1567a3fe5dbdSDavid Matlack const struct kvm_memory_slot *slot, 1568a3fe5dbdSDavid Matlack gfn_t start, gfn_t end, 1569cb00a70bSDavid Matlack int target_level, bool shared) 1570a3fe5dbdSDavid Matlack { 1571a3fe5dbdSDavid Matlack struct kvm_mmu_page *root; 1572a3fe5dbdSDavid Matlack int r = 0; 1573a3fe5dbdSDavid Matlack 1574cb00a70bSDavid Matlack kvm_lockdep_assert_mmu_lock_held(kvm, shared); 1575a3fe5dbdSDavid Matlack 15767c554d8eSPaolo Bonzini for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { 1577cb00a70bSDavid Matlack r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); 1578a3fe5dbdSDavid Matlack if (r) { 1579cb00a70bSDavid Matlack kvm_tdp_mmu_put_root(kvm, root, shared); 1580a3fe5dbdSDavid Matlack break; 1581a3fe5dbdSDavid Matlack } 1582a3fe5dbdSDavid Matlack } 1583a3fe5dbdSDavid Matlack } 1584a3fe5dbdSDavid Matlack 1585a6a0b05dSBen Gardon /* 1586a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1587a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1588a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1589a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1590a6a0b05dSBen Gardon * be flushed. 1591a6a0b05dSBen Gardon */ 1592a6a0b05dSBen Gardon static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 1593a6a0b05dSBen Gardon gfn_t start, gfn_t end) 1594a6a0b05dSBen Gardon { 1595a6a0b05dSBen Gardon struct tdp_iter iter; 1596a6a0b05dSBen Gardon u64 new_spte; 1597a6a0b05dSBen Gardon bool spte_set = false; 1598a6a0b05dSBen Gardon 15997cca2d0bSBen Gardon rcu_read_lock(); 16007cca2d0bSBen Gardon 1601a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, start, end) { 160224ae4cfaSBen Gardon retry: 160324ae4cfaSBen Gardon if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 16041af4a960SBen Gardon continue; 16051af4a960SBen Gardon 16063354ef5aSSean Christopherson if (!is_shadow_present_pte(iter.old_spte)) 16073354ef5aSSean Christopherson continue; 16083354ef5aSSean Christopherson 1609a6a0b05dSBen Gardon if (spte_ad_need_write_protect(iter.old_spte)) { 1610a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1611a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1612a6a0b05dSBen Gardon else 1613a6a0b05dSBen Gardon continue; 1614a6a0b05dSBen Gardon } else { 1615a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1616a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1617a6a0b05dSBen Gardon else 1618a6a0b05dSBen Gardon continue; 1619a6a0b05dSBen Gardon } 1620a6a0b05dSBen Gardon 16213e72c791SDavid Matlack if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) 162224ae4cfaSBen Gardon goto retry; 16233255530aSDavid Matlack 1624a6a0b05dSBen Gardon spte_set = true; 1625a6a0b05dSBen Gardon } 16267cca2d0bSBen Gardon 16277cca2d0bSBen Gardon rcu_read_unlock(); 1628a6a0b05dSBen Gardon return spte_set; 1629a6a0b05dSBen Gardon } 1630a6a0b05dSBen Gardon 1631a6a0b05dSBen Gardon /* 1632a6a0b05dSBen Gardon * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If 1633a6a0b05dSBen Gardon * AD bits are enabled, this will involve clearing the dirty bit on each SPTE. 1634a6a0b05dSBen Gardon * If AD bits are not enabled, this will require clearing the writable bit on 1635a6a0b05dSBen Gardon * each SPTE. Returns true if an SPTE has been changed and the TLBs need to 1636a6a0b05dSBen Gardon * be flushed. 1637a6a0b05dSBen Gardon */ 1638269e9552SHamza Mahfooz bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 1639269e9552SHamza Mahfooz const struct kvm_memory_slot *slot) 1640a6a0b05dSBen Gardon { 1641a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1642a6a0b05dSBen Gardon bool spte_set = false; 1643a6a0b05dSBen Gardon 164424ae4cfaSBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 1645a6a0b05dSBen Gardon 1646d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 1647a6a0b05dSBen Gardon spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, 1648a6a0b05dSBen Gardon slot->base_gfn + slot->npages); 1649a6a0b05dSBen Gardon 1650a6a0b05dSBen Gardon return spte_set; 1651a6a0b05dSBen Gardon } 1652a6a0b05dSBen Gardon 1653a6a0b05dSBen Gardon /* 1654a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1655a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1656a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1657a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1658a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1659a6a0b05dSBen Gardon */ 1660a6a0b05dSBen Gardon static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, 1661a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, bool wrprot) 1662a6a0b05dSBen Gardon { 1663a6a0b05dSBen Gardon struct tdp_iter iter; 1664a6a0b05dSBen Gardon u64 new_spte; 1665a6a0b05dSBen Gardon 16667cca2d0bSBen Gardon rcu_read_lock(); 16677cca2d0bSBen Gardon 1668a6a0b05dSBen Gardon tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), 1669a6a0b05dSBen Gardon gfn + BITS_PER_LONG) { 1670a6a0b05dSBen Gardon if (!mask) 1671a6a0b05dSBen Gardon break; 1672a6a0b05dSBen Gardon 1673a6a0b05dSBen Gardon if (iter.level > PG_LEVEL_4K || 1674a6a0b05dSBen Gardon !(mask & (1UL << (iter.gfn - gfn)))) 1675a6a0b05dSBen Gardon continue; 1676a6a0b05dSBen Gardon 1677f1b3b06aSBen Gardon mask &= ~(1UL << (iter.gfn - gfn)); 1678f1b3b06aSBen Gardon 1679a6a0b05dSBen Gardon if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { 1680a6a0b05dSBen Gardon if (is_writable_pte(iter.old_spte)) 1681a6a0b05dSBen Gardon new_spte = iter.old_spte & ~PT_WRITABLE_MASK; 1682a6a0b05dSBen Gardon else 1683a6a0b05dSBen Gardon continue; 1684a6a0b05dSBen Gardon } else { 1685a6a0b05dSBen Gardon if (iter.old_spte & shadow_dirty_mask) 1686a6a0b05dSBen Gardon new_spte = iter.old_spte & ~shadow_dirty_mask; 1687a6a0b05dSBen Gardon else 1688a6a0b05dSBen Gardon continue; 1689a6a0b05dSBen Gardon } 1690a6a0b05dSBen Gardon 1691a6a0b05dSBen Gardon tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); 1692a6a0b05dSBen Gardon } 16937cca2d0bSBen Gardon 16947cca2d0bSBen Gardon rcu_read_unlock(); 1695a6a0b05dSBen Gardon } 1696a6a0b05dSBen Gardon 1697a6a0b05dSBen Gardon /* 1698a6a0b05dSBen Gardon * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is 1699a6a0b05dSBen Gardon * set in mask, starting at gfn. The given memslot is expected to contain all 1700a6a0b05dSBen Gardon * the GFNs represented by set bits in the mask. If AD bits are enabled, 1701a6a0b05dSBen Gardon * clearing the dirty status will involve clearing the dirty bit on each SPTE 1702a6a0b05dSBen Gardon * or, if AD bits are not enabled, clearing the writable bit on each SPTE. 1703a6a0b05dSBen Gardon */ 1704a6a0b05dSBen Gardon void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1705a6a0b05dSBen Gardon struct kvm_memory_slot *slot, 1706a6a0b05dSBen Gardon gfn_t gfn, unsigned long mask, 1707a6a0b05dSBen Gardon bool wrprot) 1708a6a0b05dSBen Gardon { 1709a6a0b05dSBen Gardon struct kvm_mmu_page *root; 1710a6a0b05dSBen Gardon 1711531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1712a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 1713a6a0b05dSBen Gardon clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); 1714a6a0b05dSBen Gardon } 1715a6a0b05dSBen Gardon 1716a6a0b05dSBen Gardon /* 171787aa9ec9SBen Gardon * Clear leaf entries which could be replaced by large mappings, for 171887aa9ec9SBen Gardon * GFNs within the slot. 171914881998SBen Gardon */ 17204b85c921SSean Christopherson static void zap_collapsible_spte_range(struct kvm *kvm, 172114881998SBen Gardon struct kvm_mmu_page *root, 17224b85c921SSean Christopherson const struct kvm_memory_slot *slot) 172314881998SBen Gardon { 17249eba50f8SSean Christopherson gfn_t start = slot->base_gfn; 17259eba50f8SSean Christopherson gfn_t end = start + slot->npages; 172614881998SBen Gardon struct tdp_iter iter; 172714881998SBen Gardon kvm_pfn_t pfn; 172814881998SBen Gardon 17297cca2d0bSBen Gardon rcu_read_lock(); 17307cca2d0bSBen Gardon 173114881998SBen Gardon tdp_root_for_each_pte(iter, root, start, end) { 17322db6f772SBen Gardon retry: 17334b85c921SSean Christopherson if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) 17341af4a960SBen Gardon continue; 17351af4a960SBen Gardon 173614881998SBen Gardon if (!is_shadow_present_pte(iter.old_spte) || 173787aa9ec9SBen Gardon !is_last_spte(iter.old_spte, iter.level)) 173814881998SBen Gardon continue; 173914881998SBen Gardon 174014881998SBen Gardon pfn = spte_to_pfn(iter.old_spte); 174114881998SBen Gardon if (kvm_is_reserved_pfn(pfn) || 17429eba50f8SSean Christopherson iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, 17439eba50f8SSean Christopherson pfn, PG_LEVEL_NUM)) 174414881998SBen Gardon continue; 174514881998SBen Gardon 17464b85c921SSean Christopherson /* Note, a successful atomic zap also does a remote TLB flush. */ 17473e72c791SDavid Matlack if (tdp_mmu_zap_spte_atomic(kvm, &iter)) 17482db6f772SBen Gardon goto retry; 17492db6f772SBen Gardon } 175014881998SBen Gardon 17517cca2d0bSBen Gardon rcu_read_unlock(); 175214881998SBen Gardon } 175314881998SBen Gardon 175414881998SBen Gardon /* 175514881998SBen Gardon * Clear non-leaf entries (and free associated page tables) which could 175614881998SBen Gardon * be replaced by large mappings, for GFNs within the slot. 175714881998SBen Gardon */ 17584b85c921SSean Christopherson void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 17594b85c921SSean Christopherson const struct kvm_memory_slot *slot) 176014881998SBen Gardon { 176114881998SBen Gardon struct kvm_mmu_page *root; 176214881998SBen Gardon 17632db6f772SBen Gardon lockdep_assert_held_read(&kvm->mmu_lock); 176414881998SBen Gardon 1765d62007edSSean Christopherson for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) 17664b85c921SSean Christopherson zap_collapsible_spte_range(kvm, root, slot); 176714881998SBen Gardon } 176846044f72SBen Gardon 176946044f72SBen Gardon /* 177046044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 17715fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 177246044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 177346044f72SBen Gardon */ 177446044f72SBen Gardon static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, 17753ad93562SKeqian Zhu gfn_t gfn, int min_level) 177646044f72SBen Gardon { 177746044f72SBen Gardon struct tdp_iter iter; 177846044f72SBen Gardon u64 new_spte; 177946044f72SBen Gardon bool spte_set = false; 178046044f72SBen Gardon 17813ad93562SKeqian Zhu BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); 17823ad93562SKeqian Zhu 17837cca2d0bSBen Gardon rcu_read_lock(); 17847cca2d0bSBen Gardon 178577aa6075SDavid Matlack for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { 17863ad93562SKeqian Zhu if (!is_shadow_present_pte(iter.old_spte) || 17873ad93562SKeqian Zhu !is_last_spte(iter.old_spte, iter.level)) 17883ad93562SKeqian Zhu continue; 17893ad93562SKeqian Zhu 179046044f72SBen Gardon new_spte = iter.old_spte & 17915fc3424fSSean Christopherson ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 179246044f72SBen Gardon 17937c8a4742SDavid Matlack if (new_spte == iter.old_spte) 17947c8a4742SDavid Matlack break; 17957c8a4742SDavid Matlack 179646044f72SBen Gardon tdp_mmu_set_spte(kvm, &iter, new_spte); 179746044f72SBen Gardon spte_set = true; 179846044f72SBen Gardon } 179946044f72SBen Gardon 18007cca2d0bSBen Gardon rcu_read_unlock(); 18017cca2d0bSBen Gardon 180246044f72SBen Gardon return spte_set; 180346044f72SBen Gardon } 180446044f72SBen Gardon 180546044f72SBen Gardon /* 180646044f72SBen Gardon * Removes write access on the last level SPTE mapping this GFN and unsets the 18075fc3424fSSean Christopherson * MMU-writable bit to ensure future writes continue to be intercepted. 180846044f72SBen Gardon * Returns true if an SPTE was set and a TLB flush is needed. 180946044f72SBen Gardon */ 181046044f72SBen Gardon bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 18113ad93562SKeqian Zhu struct kvm_memory_slot *slot, gfn_t gfn, 18123ad93562SKeqian Zhu int min_level) 181346044f72SBen Gardon { 181446044f72SBen Gardon struct kvm_mmu_page *root; 181546044f72SBen Gardon bool spte_set = false; 181646044f72SBen Gardon 1817531810caSBen Gardon lockdep_assert_held_write(&kvm->mmu_lock); 1818a3f15bdaSSean Christopherson for_each_tdp_mmu_root(kvm, root, slot->as_id) 18193ad93562SKeqian Zhu spte_set |= write_protect_gfn(kvm, root, gfn, min_level); 1820a3f15bdaSSean Christopherson 182146044f72SBen Gardon return spte_set; 182246044f72SBen Gardon } 182346044f72SBen Gardon 182495fb5b02SBen Gardon /* 182595fb5b02SBen Gardon * Return the level of the lowest level SPTE added to sptes. 182695fb5b02SBen Gardon * That SPTE may be non-present. 1827c5c8c7c5SDavid Matlack * 1828c5c8c7c5SDavid Matlack * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 182995fb5b02SBen Gardon */ 183039b4d43eSSean Christopherson int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 183139b4d43eSSean Christopherson int *root_level) 183295fb5b02SBen Gardon { 183395fb5b02SBen Gardon struct tdp_iter iter; 183495fb5b02SBen Gardon struct kvm_mmu *mmu = vcpu->arch.mmu; 183595fb5b02SBen Gardon gfn_t gfn = addr >> PAGE_SHIFT; 18362aa07893SSean Christopherson int leaf = -1; 183795fb5b02SBen Gardon 183839b4d43eSSean Christopherson *root_level = vcpu->arch.mmu->shadow_root_level; 183995fb5b02SBen Gardon 184095fb5b02SBen Gardon tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 184195fb5b02SBen Gardon leaf = iter.level; 1842dde81f94SSean Christopherson sptes[leaf] = iter.old_spte; 184395fb5b02SBen Gardon } 184495fb5b02SBen Gardon 184595fb5b02SBen Gardon return leaf; 184695fb5b02SBen Gardon } 18476e8eb206SDavid Matlack 18486e8eb206SDavid Matlack /* 18496e8eb206SDavid Matlack * Returns the last level spte pointer of the shadow page walk for the given 18506e8eb206SDavid Matlack * gpa, and sets *spte to the spte value. This spte may be non-preset. If no 18516e8eb206SDavid Matlack * walk could be performed, returns NULL and *spte does not contain valid data. 18526e8eb206SDavid Matlack * 18536e8eb206SDavid Matlack * Contract: 18546e8eb206SDavid Matlack * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. 18556e8eb206SDavid Matlack * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. 18566e8eb206SDavid Matlack * 18576e8eb206SDavid Matlack * WARNING: This function is only intended to be called during fast_page_fault. 18586e8eb206SDavid Matlack */ 18596e8eb206SDavid Matlack u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 18606e8eb206SDavid Matlack u64 *spte) 18616e8eb206SDavid Matlack { 18626e8eb206SDavid Matlack struct tdp_iter iter; 18636e8eb206SDavid Matlack struct kvm_mmu *mmu = vcpu->arch.mmu; 18646e8eb206SDavid Matlack gfn_t gfn = addr >> PAGE_SHIFT; 18656e8eb206SDavid Matlack tdp_ptep_t sptep = NULL; 18666e8eb206SDavid Matlack 18676e8eb206SDavid Matlack tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { 18686e8eb206SDavid Matlack *spte = iter.old_spte; 18696e8eb206SDavid Matlack sptep = iter.sptep; 18706e8eb206SDavid Matlack } 18716e8eb206SDavid Matlack 18726e8eb206SDavid Matlack /* 18736e8eb206SDavid Matlack * Perform the rcu_dereference to get the raw spte pointer value since 18746e8eb206SDavid Matlack * we are passing it up to fast_page_fault, which is shared with the 18756e8eb206SDavid Matlack * legacy MMU and thus does not retain the TDP MMU-specific __rcu 18766e8eb206SDavid Matlack * annotation. 18776e8eb206SDavid Matlack * 18786e8eb206SDavid Matlack * This is safe since fast_page_fault obeys the contracts of this 18796e8eb206SDavid Matlack * function as well as all TDP MMU contracts around modifying SPTEs 18806e8eb206SDavid Matlack * outside of mmu_lock. 18816e8eb206SDavid Matlack */ 18826e8eb206SDavid Matlack return rcu_dereference(sptep); 18836e8eb206SDavid Matlack } 1884