1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef __KVM_X86_MMU_TDP_MMU_H 4 #define __KVM_X86_MMU_TDP_MMU_H 5 6 #include <linux/kvm_host.h> 7 8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 9 10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, 11 struct kvm_mmu_page *root) 12 { 13 if (root->role.invalid) 14 return false; 15 16 return refcount_inc_not_zero(&root->tdp_mmu_root_count); 17 } 18 19 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 20 bool shared); 21 22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 23 gfn_t end, bool can_yield, bool flush, 24 bool shared); 25 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, 26 gfn_t start, gfn_t end, bool flush, 27 bool shared) 28 { 29 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush, 30 shared); 31 } 32 static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 33 { 34 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); 35 36 /* 37 * Don't allow yielding, as the caller may have a flush pending. Note, 38 * if mmu_lock is held for write, zapping will never yield in this case, 39 * but explicitly disallow it for safety. The TDP MMU does not yield 40 * until it has made forward progress (steps sideways), and when zapping 41 * a single shadow page that it's guaranteed to see (thus the mmu_lock 42 * requirement), its "step sideways" will always step beyond the bounds 43 * of the shadow page's gfn range and stop iterating before yielding. 44 */ 45 lockdep_assert_held_write(&kvm->mmu_lock); 46 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), 47 sp->gfn, end, false, false, false); 48 } 49 50 void kvm_tdp_mmu_zap_all(struct kvm *kvm); 51 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); 52 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); 53 54 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 55 int map_writable, int max_level, kvm_pfn_t pfn, 56 bool prefault); 57 58 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 59 bool flush); 60 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 61 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 62 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 63 64 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, 65 int min_level); 66 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 67 struct kvm_memory_slot *slot); 68 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 69 struct kvm_memory_slot *slot, 70 gfn_t gfn, unsigned long mask, 71 bool wrprot); 72 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 73 const struct kvm_memory_slot *slot, 74 bool flush); 75 76 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 77 struct kvm_memory_slot *slot, gfn_t gfn); 78 79 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 80 int *root_level); 81 82 #ifdef CONFIG_X86_64 83 void kvm_mmu_init_tdp_mmu(struct kvm *kvm); 84 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 85 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } 86 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 87 #else 88 static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} 89 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} 90 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } 91 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 92 #endif 93 94 static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) 95 { 96 struct kvm_mmu_page *sp; 97 98 if (!is_tdp_mmu_enabled(kvm)) 99 return false; 100 if (WARN_ON(!VALID_PAGE(hpa))) 101 return false; 102 103 sp = to_shadow_page(hpa); 104 if (WARN_ON(!sp)) 105 return false; 106 107 return is_tdp_mmu_page(sp) && sp->root_count; 108 } 109 110 #endif /* __KVM_X86_MMU_TDP_MMU_H */ 111