xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.h (revision 8ffdff6a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
5 
6 #include <linux/kvm_host.h>
7 
8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
9 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
10 
11 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
12 				 bool can_yield);
13 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
14 					     gfn_t end)
15 {
16 	return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
17 }
18 static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
19 {
20 	gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
21 
22 	/*
23 	 * Don't allow yielding, as the caller may have a flush pending.  Note,
24 	 * if mmu_lock is held for write, zapping will never yield in this case,
25 	 * but explicitly disallow it for safety.  The TDP MMU does not yield
26 	 * until it has made forward progress (steps sideways), and when zapping
27 	 * a single shadow page that it's guaranteed to see (thus the mmu_lock
28 	 * requirement), its "step sideways" will always step beyond the bounds
29 	 * of the shadow page's gfn range and stop iterating before yielding.
30 	 */
31 	lockdep_assert_held_write(&kvm->mmu_lock);
32 	return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
33 }
34 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
35 
36 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
37 		    int map_writable, int max_level, kvm_pfn_t pfn,
38 		    bool prefault);
39 
40 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
41 			      unsigned long end);
42 
43 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
44 			      unsigned long end);
45 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
46 
47 int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
48 			     pte_t *host_ptep);
49 
50 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
51 			     int min_level);
52 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
53 				  struct kvm_memory_slot *slot);
54 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
55 				       struct kvm_memory_slot *slot,
56 				       gfn_t gfn, unsigned long mask,
57 				       bool wrprot);
58 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
59 				       struct kvm_memory_slot *slot);
60 
61 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
62 				   struct kvm_memory_slot *slot, gfn_t gfn);
63 
64 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
65 			 int *root_level);
66 
67 #ifdef CONFIG_X86_64
68 void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
69 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
70 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
71 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
72 #else
73 static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
74 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
75 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
76 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
77 #endif
78 
79 static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
80 {
81 	struct kvm_mmu_page *sp;
82 
83 	if (!is_tdp_mmu_enabled(kvm))
84 		return false;
85 	if (WARN_ON(!VALID_PAGE(hpa)))
86 		return false;
87 
88 	sp = to_shadow_page(hpa);
89 	if (WARN_ON(!sp))
90 		return false;
91 
92 	return is_tdp_mmu_page(sp) && sp->root_count;
93 }
94 
95 #endif /* __KVM_X86_MMU_TDP_MMU_H */
96