xref: /openbmc/linux/arch/x86/kvm/mmu/mmu_internal.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
4 
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
8 
9 #undef MMU_DEBUG
10 
11 #ifdef MMU_DEBUG
12 extern bool dbg;
13 
14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15 #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
16 #define MMU_WARN_ON(x) WARN_ON(x)
17 #else
18 #define pgprintk(x...) do { } while (0)
19 #define rmap_printk(x...) do { } while (0)
20 #define MMU_WARN_ON(x) do { } while (0)
21 #endif
22 
23 struct kvm_mmu_page {
24 	struct list_head link;
25 	struct hlist_node hash_link;
26 	struct list_head lpage_disallowed_link;
27 
28 	bool unsync;
29 	u8 mmu_valid_gen;
30 	bool mmio_cached;
31 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
32 
33 	/*
34 	 * The following two entries are used to key the shadow page in the
35 	 * hash table.
36 	 */
37 	union kvm_mmu_page_role role;
38 	gfn_t gfn;
39 
40 	u64 *spt;
41 	/* hold the gfn of each spte inside spt */
42 	gfn_t *gfns;
43 	int root_count;          /* Currently serving as active root */
44 	unsigned int unsync_children;
45 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
46 	DECLARE_BITMAP(unsync_child_bitmap, 512);
47 
48 #ifdef CONFIG_X86_32
49 	/*
50 	 * Used out of the mmu-lock to avoid reading spte values while an
51 	 * update is in progress; see the comments in __get_spte_lockless().
52 	 */
53 	int clear_spte_count;
54 #endif
55 
56 	/* Number of writes since the last time traversal visited this page.  */
57 	atomic_t write_flooding_count;
58 
59 #ifdef CONFIG_X86_64
60 	bool tdp_mmu_page;
61 
62 	/* Used for freeing the page asyncronously if it is a TDP MMU page. */
63 	struct rcu_head rcu_head;
64 #endif
65 };
66 
67 extern struct kmem_cache *mmu_page_header_cache;
68 
69 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
70 {
71 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
72 
73 	return (struct kvm_mmu_page *)page_private(page);
74 }
75 
76 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
77 {
78 	return to_shadow_page(__pa(sptep));
79 }
80 
81 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
82 {
83 	/*
84 	 * When using the EPT page-modification log, the GPAs in the CPU dirty
85 	 * log would come from L2 rather than L1.  Therefore, we need to rely
86 	 * on write protection to record dirty pages, which bypasses PML, since
87 	 * writes now result in a vmexit.  Note, the check on CPU dirty logging
88 	 * being enabled is mandatory as the bits used to denote WP-only SPTEs
89 	 * are reserved for NPT w/ PAE (32-bit KVM).
90 	 */
91 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
92 	       kvm_x86_ops.cpu_dirty_log_size;
93 }
94 
95 bool is_nx_huge_page_enabled(void);
96 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
97 			    bool can_unsync);
98 
99 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
100 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
101 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
102 				    struct kvm_memory_slot *slot, u64 gfn);
103 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
104 					u64 start_gfn, u64 pages);
105 
106 static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
107 {
108 	BUG_ON(!sp->root_count);
109 	lockdep_assert_held(&kvm->mmu_lock);
110 
111 	++sp->root_count;
112 }
113 
114 static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
115 {
116 	lockdep_assert_held(&kvm->mmu_lock);
117 	--sp->root_count;
118 
119 	return !sp->root_count;
120 }
121 
122 /*
123  * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
124  *
125  * RET_PF_RETRY: let CPU fault again on the address.
126  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
127  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
128  * RET_PF_FIXED: The faulting entry has been fixed.
129  * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
130  */
131 enum {
132 	RET_PF_RETRY = 0,
133 	RET_PF_EMULATE,
134 	RET_PF_INVALID,
135 	RET_PF_FIXED,
136 	RET_PF_SPURIOUS,
137 };
138 
139 /* Bits which may be returned by set_spte() */
140 #define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
141 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)
142 #define SET_SPTE_SPURIOUS		BIT(2)
143 
144 int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
145 			      gfn_t gfn, kvm_pfn_t pfn, int max_level);
146 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
147 			    int max_level, kvm_pfn_t *pfnp,
148 			    bool huge_page_disallowed, int *req_level);
149 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
150 				kvm_pfn_t *pfnp, int *goal_levelp);
151 
152 bool is_nx_huge_page_enabled(void);
153 
154 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
155 
156 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
157 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
158 
159 #endif /* __KVM_X86_MMU_INTERNAL_H */
160