1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef __KVM_X86_MMU_TDP_MMU_H 4 #define __KVM_X86_MMU_TDP_MMU_H 5 6 #include <linux/kvm_host.h> 7 8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 9 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); 10 11 bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end); 12 void kvm_tdp_mmu_zap_all(struct kvm *kvm); 13 14 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 15 int map_writable, int max_level, kvm_pfn_t pfn, 16 bool prefault); 17 18 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, 19 unsigned long end); 20 21 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, 22 unsigned long end); 23 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva); 24 25 int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, 26 pte_t *host_ptep); 27 28 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, 29 int min_level); 30 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 31 struct kvm_memory_slot *slot); 32 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 33 struct kvm_memory_slot *slot, 34 gfn_t gfn, unsigned long mask, 35 bool wrprot); 36 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 37 struct kvm_memory_slot *slot); 38 39 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 40 struct kvm_memory_slot *slot, gfn_t gfn); 41 42 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 43 int *root_level); 44 45 #ifdef CONFIG_X86_64 46 void kvm_mmu_init_tdp_mmu(struct kvm *kvm); 47 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 48 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } 49 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 50 #else 51 static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} 52 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} 53 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } 54 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 55 #endif 56 57 static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) 58 { 59 struct kvm_mmu_page *sp; 60 61 if (!is_tdp_mmu_enabled(kvm)) 62 return false; 63 if (WARN_ON(!VALID_PAGE(hpa))) 64 return false; 65 66 sp = to_shadow_page(hpa); 67 if (WARN_ON(!sp)) 68 return false; 69 70 return is_tdp_mmu_page(sp) && sp->root_count; 71 } 72 73 #endif /* __KVM_X86_MMU_TDP_MMU_H */ 74