xref: /openbmc/linux/arch/x86/kvm/mmu/spte.h (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #ifndef KVM_X86_MMU_SPTE_H
4 #define KVM_X86_MMU_SPTE_H
5 
6 #include "mmu_internal.h"
7 
8 /*
9  * A MMU present SPTE is backed by actual memory and may or may not be present
10  * in hardware.  E.g. MMIO SPTEs are not considered present.  Use bit 11, as it
11  * is ignored by all flavors of SPTEs and checking a low bit often generates
12  * better code than for a high bit, e.g. 56+.  MMU present checks are pervasive
13  * enough that the improved code generation is noticeable in KVM's footprint.
14  */
15 #define SPTE_MMU_PRESENT_MASK		BIT_ULL(11)
16 
17 /*
18  * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also
19  * be restricted to using write-protection (for L2 when CPU dirty logging, i.e.
20  * PML, is enabled).  Use bits 52 and 53 to hold the type of A/D tracking that
21  * is must be employed for a given TDP SPTE.
22  *
23  * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE
24  * paging, including NPT PAE.  This scheme works because legacy shadow paging
25  * is guaranteed to have A/D bits and write-protection is forced only for
26  * TDP with CPU dirty logging (PML).  If NPT ever gains PML-like support, it
27  * must be restricted to 64-bit KVM.
28  */
29 #define SPTE_TDP_AD_SHIFT		52
30 #define SPTE_TDP_AD_MASK		(3ULL << SPTE_TDP_AD_SHIFT)
31 #define SPTE_TDP_AD_ENABLED_MASK	(0ULL << SPTE_TDP_AD_SHIFT)
32 #define SPTE_TDP_AD_DISABLED_MASK	(1ULL << SPTE_TDP_AD_SHIFT)
33 #define SPTE_TDP_AD_WRPROT_ONLY_MASK	(2ULL << SPTE_TDP_AD_SHIFT)
34 static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
35 
36 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
37 #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
38 #else
39 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
40 #endif
41 #define PT64_LVL_ADDR_MASK(level) \
42 	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
43 						* PT64_LEVEL_BITS))) - 1))
44 #define PT64_LVL_OFFSET_MASK(level) \
45 	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
46 						* PT64_LEVEL_BITS))) - 1))
47 
48 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
49 			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
50 
51 #define ACC_EXEC_MASK    1
52 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
53 #define ACC_USER_MASK    PT_USER_MASK
54 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
55 
56 /* The mask for the R/X bits in EPT PTEs */
57 #define PT64_EPT_READABLE_MASK			0x1ull
58 #define PT64_EPT_EXECUTABLE_MASK		0x4ull
59 
60 #define PT64_LEVEL_BITS 9
61 
62 #define PT64_LEVEL_SHIFT(level) \
63 		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
64 
65 #define PT64_INDEX(address, level)\
66 	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
67 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
68 
69 /* Bits 9 and 10 are ignored by all non-EPT PTEs. */
70 #define DEFAULT_SPTE_HOST_WRITEABLE	BIT_ULL(9)
71 #define DEFAULT_SPTE_MMU_WRITEABLE	BIT_ULL(10)
72 
73 /*
74  * The mask/shift to use for saving the original R/X bits when marking the PTE
75  * as not-present for access tracking purposes. We do not save the W bit as the
76  * PTEs being access tracked also need to be dirty tracked, so the W bit will be
77  * restored only when a write is attempted to the page.  This mask obviously
78  * must not overlap the A/D type mask.
79  */
80 #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
81 					  PT64_EPT_EXECUTABLE_MASK)
82 #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54
83 #define SHADOW_ACC_TRACK_SAVED_MASK	(SHADOW_ACC_TRACK_SAVED_BITS_MASK << \
84 					 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
85 static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
86 
87 /*
88  * Low ignored bits are at a premium for EPT, use high ignored bits, taking care
89  * to not overlap the A/D type mask or the saved access bits of access-tracked
90  * SPTEs when A/D bits are disabled.
91  */
92 #define EPT_SPTE_HOST_WRITABLE		BIT_ULL(57)
93 #define EPT_SPTE_MMU_WRITABLE		BIT_ULL(58)
94 
95 static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
96 static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
97 static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
98 static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
99 
100 /* Defined only to keep the above static asserts readable. */
101 #undef SHADOW_ACC_TRACK_SAVED_MASK
102 
103 /*
104  * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
105  * the memslots generation and is derived as follows:
106  *
107  * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10
108  * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62
109  *
110  * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
111  * the MMIO generation number, as doing so would require stealing a bit from
112  * the "real" generation number and thus effectively halve the maximum number
113  * of MMIO generations that can be handled before encountering a wrap (which
114  * requires a full MMU zap).  The flag is instead explicitly queried when
115  * checking for MMIO spte cache hits.
116  */
117 
118 #define MMIO_SPTE_GEN_LOW_START		3
119 #define MMIO_SPTE_GEN_LOW_END		10
120 
121 #define MMIO_SPTE_GEN_HIGH_START	52
122 #define MMIO_SPTE_GEN_HIGH_END		62
123 
124 #define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
125 						    MMIO_SPTE_GEN_LOW_START)
126 #define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
127 						    MMIO_SPTE_GEN_HIGH_START)
128 static_assert(!(SPTE_MMU_PRESENT_MASK &
129 		(MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
130 
131 #define MMIO_SPTE_GEN_LOW_BITS		(MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
132 #define MMIO_SPTE_GEN_HIGH_BITS		(MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
133 
134 /* remember to adjust the comment above as well if you change these */
135 static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
136 
137 #define MMIO_SPTE_GEN_LOW_SHIFT		(MMIO_SPTE_GEN_LOW_START - 0)
138 #define MMIO_SPTE_GEN_HIGH_SHIFT	(MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
139 
140 #define MMIO_SPTE_GEN_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
141 
142 extern u64 __read_mostly shadow_host_writable_mask;
143 extern u64 __read_mostly shadow_mmu_writable_mask;
144 extern u64 __read_mostly shadow_nx_mask;
145 extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
146 extern u64 __read_mostly shadow_user_mask;
147 extern u64 __read_mostly shadow_accessed_mask;
148 extern u64 __read_mostly shadow_dirty_mask;
149 extern u64 __read_mostly shadow_mmio_value;
150 extern u64 __read_mostly shadow_mmio_mask;
151 extern u64 __read_mostly shadow_mmio_access_mask;
152 extern u64 __read_mostly shadow_present_mask;
153 extern u64 __read_mostly shadow_me_mask;
154 
155 /*
156  * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED_MASK;
157  * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
158  * pages.
159  */
160 extern u64 __read_mostly shadow_acc_track_mask;
161 
162 /*
163  * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
164  * to guard against L1TF attacks.
165  */
166 extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
167 
168 /*
169  * The number of high-order 1 bits to use in the mask above.
170  */
171 #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
172 
173 /*
174  * If a thread running without exclusive control of the MMU lock must perform a
175  * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
176  * non-present intermediate value. Other threads which encounter this value
177  * should not modify the SPTE.
178  *
179  * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on
180  * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF
181  * vulnerability.  Use only low bits to avoid 64-bit immediates.
182  *
183  * Only used by the TDP MMU.
184  */
185 #define REMOVED_SPTE	0x5a0ULL
186 
187 /* Removed SPTEs must not be misconstrued as shadow present PTEs. */
188 static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
189 
190 static inline bool is_removed_spte(u64 spte)
191 {
192 	return spte == REMOVED_SPTE;
193 }
194 
195 /*
196  * In some cases, we need to preserve the GFN of a non-present or reserved
197  * SPTE when we usurp the upper five bits of the physical address space to
198  * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
199  * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
200  * left into the reserved bits, i.e. the GFN in the SPTE will be split into
201  * high and low parts.  This mask covers the lower bits of the GFN.
202  */
203 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
204 
205 /*
206  * The number of non-reserved physical address bits irrespective of features
207  * that repurpose legal bits, e.g. MKTME.
208  */
209 extern u8 __read_mostly shadow_phys_bits;
210 
211 static inline bool is_mmio_spte(u64 spte)
212 {
213 	return (spte & shadow_mmio_mask) == shadow_mmio_value &&
214 	       likely(shadow_mmio_value);
215 }
216 
217 static inline bool is_shadow_present_pte(u64 pte)
218 {
219 	return !!(pte & SPTE_MMU_PRESENT_MASK);
220 }
221 
222 static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
223 {
224 	return sp->role.ad_disabled;
225 }
226 
227 static inline bool spte_ad_enabled(u64 spte)
228 {
229 	MMU_WARN_ON(!is_shadow_present_pte(spte));
230 	return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK;
231 }
232 
233 static inline bool spte_ad_need_write_protect(u64 spte)
234 {
235 	MMU_WARN_ON(!is_shadow_present_pte(spte));
236 	/*
237 	 * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED_MASK is '0',
238 	 * and non-TDP SPTEs will never set these bits.  Optimize for 64-bit
239 	 * TDP and do the A/D type check unconditionally.
240 	 */
241 	return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK;
242 }
243 
244 static inline u64 spte_shadow_accessed_mask(u64 spte)
245 {
246 	MMU_WARN_ON(!is_shadow_present_pte(spte));
247 	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
248 }
249 
250 static inline u64 spte_shadow_dirty_mask(u64 spte)
251 {
252 	MMU_WARN_ON(!is_shadow_present_pte(spte));
253 	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
254 }
255 
256 static inline bool is_access_track_spte(u64 spte)
257 {
258 	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
259 }
260 
261 static inline bool is_large_pte(u64 pte)
262 {
263 	return pte & PT_PAGE_SIZE_MASK;
264 }
265 
266 static inline bool is_last_spte(u64 pte, int level)
267 {
268 	return (level == PG_LEVEL_4K) || is_large_pte(pte);
269 }
270 
271 static inline bool is_executable_pte(u64 spte)
272 {
273 	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
274 }
275 
276 static inline kvm_pfn_t spte_to_pfn(u64 pte)
277 {
278 	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
279 }
280 
281 static inline bool is_accessed_spte(u64 spte)
282 {
283 	u64 accessed_mask = spte_shadow_accessed_mask(spte);
284 
285 	return accessed_mask ? spte & accessed_mask
286 			     : !is_access_track_spte(spte);
287 }
288 
289 static inline bool is_dirty_spte(u64 spte)
290 {
291 	u64 dirty_mask = spte_shadow_dirty_mask(spte);
292 
293 	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
294 }
295 
296 static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
297 				int level)
298 {
299 	int bit7 = (pte >> 7) & 1;
300 
301 	return rsvd_check->rsvd_bits_mask[bit7][level-1];
302 }
303 
304 static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
305 				      u64 pte, int level)
306 {
307 	return pte & get_rsvd_bits(rsvd_check, pte, level);
308 }
309 
310 static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
311 				   u64 pte)
312 {
313 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
314 }
315 
316 static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
317 					 u64 spte, int level)
318 {
319 	/*
320 	 * Use a bitwise-OR instead of a logical-OR to aggregate the reserved
321 	 * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
322 	 * (this is extremely unlikely to be short-circuited as true).
323 	 */
324 	return __is_bad_mt_xwr(rsvd_check, spte) |
325 	       __is_rsvd_bits_set(rsvd_check, spte, level);
326 }
327 
328 static inline bool spte_can_locklessly_be_made_writable(u64 spte)
329 {
330 	return (spte & shadow_host_writable_mask) &&
331 	       (spte & shadow_mmu_writable_mask);
332 }
333 
334 static inline u64 get_mmio_spte_generation(u64 spte)
335 {
336 	u64 gen;
337 
338 	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
339 	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
340 	return gen;
341 }
342 
343 /* Bits which may be returned by set_spte() */
344 #define SET_SPTE_WRITE_PROTECTED_PT    BIT(0)
345 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
346 #define SET_SPTE_SPURIOUS              BIT(2)
347 
348 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
349 		     gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
350 		     bool can_unsync, bool host_writable, bool ad_disabled,
351 		     u64 *new_spte);
352 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
353 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
354 u64 mark_spte_for_access_track(u64 spte);
355 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
356 
357 void kvm_mmu_reset_all_pte_masks(void);
358 
359 #endif
360