xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_mmu.h (revision 22a41e9a5044bf3519f05b4a00e99af34bfeb40c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
5 
6 #include <linux/kvm_host.h>
7 
8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
9 
10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
11 {
12 	return refcount_inc_not_zero(&root->tdp_mmu_root_count);
13 }
14 
15 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
16 			  bool shared);
17 
18 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
19 				 gfn_t end, bool can_yield, bool flush);
20 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
21 					     gfn_t start, gfn_t end, bool flush)
22 {
23 	return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
24 }
25 
26 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
27 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
28 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
29 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
30 
31 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
32 
33 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
34 				 bool flush);
35 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
36 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
37 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
38 
39 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
40 			     const struct kvm_memory_slot *slot, int min_level);
41 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
42 				  const struct kvm_memory_slot *slot);
43 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
44 				       struct kvm_memory_slot *slot,
45 				       gfn_t gfn, unsigned long mask,
46 				       bool wrprot);
47 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
48 				       const struct kvm_memory_slot *slot);
49 
50 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
51 				   struct kvm_memory_slot *slot, gfn_t gfn,
52 				   int min_level);
53 
54 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
55 				      const struct kvm_memory_slot *slot,
56 				      gfn_t start, gfn_t end,
57 				      int target_level, bool shared);
58 
59 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
60 {
61 	rcu_read_lock();
62 }
63 
64 static inline void kvm_tdp_mmu_walk_lockless_end(void)
65 {
66 	rcu_read_unlock();
67 }
68 
69 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
70 			 int *root_level);
71 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
72 					u64 *spte);
73 
74 #ifdef CONFIG_X86_64
75 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
76 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
77 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
78 
79 static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
80 {
81 	struct kvm_mmu_page *sp;
82 	hpa_t hpa = mmu->root.hpa;
83 
84 	if (WARN_ON(!VALID_PAGE(hpa)))
85 		return false;
86 
87 	/*
88 	 * A NULL shadow page is legal when shadowing a non-paging guest with
89 	 * PAE paging, as the MMU will be direct with root_hpa pointing at the
90 	 * pae_root page, not a shadow page.
91 	 */
92 	sp = to_shadow_page(hpa);
93 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
94 }
95 #else
96 static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
97 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
98 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
99 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
100 #endif
101 
102 #endif /* __KVM_X86_MMU_TDP_MMU_H */
103