xref: /openbmc/linux/arch/x86/kvm/mmu/spte.h (revision 8931ddd8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #ifndef KVM_X86_MMU_SPTE_H
4 #define KVM_X86_MMU_SPTE_H
5 
6 #include "mmu_internal.h"
7 
8 /*
9  * A MMU present SPTE is backed by actual memory and may or may not be present
10  * in hardware.  E.g. MMIO SPTEs are not considered present.  Use bit 11, as it
11  * is ignored by all flavors of SPTEs and checking a low bit often generates
12  * better code than for a high bit, e.g. 56+.  MMU present checks are pervasive
13  * enough that the improved code generation is noticeable in KVM's footprint.
14  */
15 #define SPTE_MMU_PRESENT_MASK		BIT_ULL(11)
16 
17 /*
18  * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also
19  * be restricted to using write-protection (for L2 when CPU dirty logging, i.e.
20  * PML, is enabled).  Use bits 52 and 53 to hold the type of A/D tracking that
21  * is must be employed for a given TDP SPTE.
22  *
23  * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE
24  * paging, including NPT PAE.  This scheme works because legacy shadow paging
25  * is guaranteed to have A/D bits and write-protection is forced only for
26  * TDP with CPU dirty logging (PML).  If NPT ever gains PML-like support, it
27  * must be restricted to 64-bit KVM.
28  */
29 #define SPTE_TDP_AD_SHIFT		52
30 #define SPTE_TDP_AD_MASK		(3ULL << SPTE_TDP_AD_SHIFT)
31 #define SPTE_TDP_AD_ENABLED_MASK	(0ULL << SPTE_TDP_AD_SHIFT)
32 #define SPTE_TDP_AD_DISABLED_MASK	(1ULL << SPTE_TDP_AD_SHIFT)
33 #define SPTE_TDP_AD_WRPROT_ONLY_MASK	(2ULL << SPTE_TDP_AD_SHIFT)
34 static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
35 
36 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
37 #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
38 #else
39 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
40 #endif
41 
42 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
43 			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
44 
45 #define ACC_EXEC_MASK    1
46 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
47 #define ACC_USER_MASK    PT_USER_MASK
48 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
49 
50 /* The mask for the R/X bits in EPT PTEs */
51 #define PT64_EPT_READABLE_MASK			0x1ull
52 #define PT64_EPT_EXECUTABLE_MASK		0x4ull
53 
54 #define PT64_LEVEL_BITS 9
55 
56 #define PT64_LEVEL_SHIFT(level) \
57 		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
58 
59 #define PT64_INDEX(address, level)\
60 	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
61 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
62 
63 /*
64  * The mask/shift to use for saving the original R/X bits when marking the PTE
65  * as not-present for access tracking purposes. We do not save the W bit as the
66  * PTEs being access tracked also need to be dirty tracked, so the W bit will be
67  * restored only when a write is attempted to the page.  This mask obviously
68  * must not overlap the A/D type mask.
69  */
70 #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
71 					  PT64_EPT_EXECUTABLE_MASK)
72 #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54
73 #define SHADOW_ACC_TRACK_SAVED_MASK	(SHADOW_ACC_TRACK_SAVED_BITS_MASK << \
74 					 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
75 static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
76 
77 /*
78  * *_SPTE_HOST_WRITEABLE (aka Host-writable) indicates whether the host permits
79  * writes to the guest page mapped by the SPTE. This bit is cleared on SPTEs
80  * that map guest pages in read-only memslots and read-only VMAs.
81  *
82  * Invariants:
83  *  - If Host-writable is clear, PT_WRITABLE_MASK must be clear.
84  *
85  *
86  * *_SPTE_MMU_WRITEABLE (aka MMU-writable) indicates whether the shadow MMU
87  * allows writes to the guest page mapped by the SPTE. This bit is cleared when
88  * the guest page mapped by the SPTE contains a page table that is being
89  * monitored for shadow paging. In this case the SPTE can only be made writable
90  * by unsyncing the shadow page under the mmu_lock.
91  *
92  * Invariants:
93  *  - If MMU-writable is clear, PT_WRITABLE_MASK must be clear.
94  *  - If MMU-writable is set, Host-writable must be set.
95  *
96  * If MMU-writable is set, PT_WRITABLE_MASK is normally set but can be cleared
97  * to track writes for dirty logging. For such SPTEs, KVM will locklessly set
98  * PT_WRITABLE_MASK upon the next write from the guest and record the write in
99  * the dirty log (see fast_page_fault()).
100  */
101 
102 /* Bits 9 and 10 are ignored by all non-EPT PTEs. */
103 #define DEFAULT_SPTE_HOST_WRITEABLE	BIT_ULL(9)
104 #define DEFAULT_SPTE_MMU_WRITEABLE	BIT_ULL(10)
105 
106 /*
107  * Low ignored bits are at a premium for EPT, use high ignored bits, taking care
108  * to not overlap the A/D type mask or the saved access bits of access-tracked
109  * SPTEs when A/D bits are disabled.
110  */
111 #define EPT_SPTE_HOST_WRITABLE		BIT_ULL(57)
112 #define EPT_SPTE_MMU_WRITABLE		BIT_ULL(58)
113 
114 static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
115 static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
116 static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
117 static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
118 
119 /* Defined only to keep the above static asserts readable. */
120 #undef SHADOW_ACC_TRACK_SAVED_MASK
121 
122 /*
123  * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
124  * the memslots generation and is derived as follows:
125  *
126  * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10
127  * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62
128  *
129  * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
130  * the MMIO generation number, as doing so would require stealing a bit from
131  * the "real" generation number and thus effectively halve the maximum number
132  * of MMIO generations that can be handled before encountering a wrap (which
133  * requires a full MMU zap).  The flag is instead explicitly queried when
134  * checking for MMIO spte cache hits.
135  */
136 
137 #define MMIO_SPTE_GEN_LOW_START		3
138 #define MMIO_SPTE_GEN_LOW_END		10
139 
140 #define MMIO_SPTE_GEN_HIGH_START	52
141 #define MMIO_SPTE_GEN_HIGH_END		62
142 
143 #define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
144 						    MMIO_SPTE_GEN_LOW_START)
145 #define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
146 						    MMIO_SPTE_GEN_HIGH_START)
147 static_assert(!(SPTE_MMU_PRESENT_MASK &
148 		(MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
149 
150 #define MMIO_SPTE_GEN_LOW_BITS		(MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
151 #define MMIO_SPTE_GEN_HIGH_BITS		(MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
152 
153 /* remember to adjust the comment above as well if you change these */
154 static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
155 
156 #define MMIO_SPTE_GEN_LOW_SHIFT		(MMIO_SPTE_GEN_LOW_START - 0)
157 #define MMIO_SPTE_GEN_HIGH_SHIFT	(MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
158 
159 #define MMIO_SPTE_GEN_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
160 
161 extern u64 __read_mostly shadow_host_writable_mask;
162 extern u64 __read_mostly shadow_mmu_writable_mask;
163 extern u64 __read_mostly shadow_nx_mask;
164 extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
165 extern u64 __read_mostly shadow_user_mask;
166 extern u64 __read_mostly shadow_accessed_mask;
167 extern u64 __read_mostly shadow_dirty_mask;
168 extern u64 __read_mostly shadow_mmio_value;
169 extern u64 __read_mostly shadow_mmio_mask;
170 extern u64 __read_mostly shadow_mmio_access_mask;
171 extern u64 __read_mostly shadow_present_mask;
172 extern u64 __read_mostly shadow_me_mask;
173 
174 /*
175  * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED_MASK;
176  * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
177  * pages.
178  */
179 extern u64 __read_mostly shadow_acc_track_mask;
180 
181 /*
182  * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
183  * to guard against L1TF attacks.
184  */
185 extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
186 
187 /*
188  * The number of high-order 1 bits to use in the mask above.
189  */
190 #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
191 
192 /*
193  * If a thread running without exclusive control of the MMU lock must perform a
194  * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
195  * non-present intermediate value. Other threads which encounter this value
196  * should not modify the SPTE.
197  *
198  * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on
199  * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF
200  * vulnerability.  Use only low bits to avoid 64-bit immediates.
201  *
202  * Only used by the TDP MMU.
203  */
204 #define REMOVED_SPTE	0x5a0ULL
205 
206 /* Removed SPTEs must not be misconstrued as shadow present PTEs. */
207 static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
208 
209 static inline bool is_removed_spte(u64 spte)
210 {
211 	return spte == REMOVED_SPTE;
212 }
213 
214 /*
215  * In some cases, we need to preserve the GFN of a non-present or reserved
216  * SPTE when we usurp the upper five bits of the physical address space to
217  * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
218  * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
219  * left into the reserved bits, i.e. the GFN in the SPTE will be split into
220  * high and low parts.  This mask covers the lower bits of the GFN.
221  */
222 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
223 
224 /*
225  * The number of non-reserved physical address bits irrespective of features
226  * that repurpose legal bits, e.g. MKTME.
227  */
228 extern u8 __read_mostly shadow_phys_bits;
229 
230 static inline bool is_mmio_spte(u64 spte)
231 {
232 	return (spte & shadow_mmio_mask) == shadow_mmio_value &&
233 	       likely(shadow_mmio_value);
234 }
235 
236 static inline bool is_shadow_present_pte(u64 pte)
237 {
238 	return !!(pte & SPTE_MMU_PRESENT_MASK);
239 }
240 
241 static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
242 {
243 	return sp->role.ad_disabled;
244 }
245 
246 static inline bool spte_ad_enabled(u64 spte)
247 {
248 	MMU_WARN_ON(!is_shadow_present_pte(spte));
249 	return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK;
250 }
251 
252 static inline bool spte_ad_need_write_protect(u64 spte)
253 {
254 	MMU_WARN_ON(!is_shadow_present_pte(spte));
255 	/*
256 	 * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED_MASK is '0',
257 	 * and non-TDP SPTEs will never set these bits.  Optimize for 64-bit
258 	 * TDP and do the A/D type check unconditionally.
259 	 */
260 	return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK;
261 }
262 
263 static inline u64 spte_shadow_accessed_mask(u64 spte)
264 {
265 	MMU_WARN_ON(!is_shadow_present_pte(spte));
266 	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
267 }
268 
269 static inline u64 spte_shadow_dirty_mask(u64 spte)
270 {
271 	MMU_WARN_ON(!is_shadow_present_pte(spte));
272 	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
273 }
274 
275 static inline bool is_access_track_spte(u64 spte)
276 {
277 	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
278 }
279 
280 static inline bool is_large_pte(u64 pte)
281 {
282 	return pte & PT_PAGE_SIZE_MASK;
283 }
284 
285 static inline bool is_last_spte(u64 pte, int level)
286 {
287 	return (level == PG_LEVEL_4K) || is_large_pte(pte);
288 }
289 
290 static inline bool is_executable_pte(u64 spte)
291 {
292 	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
293 }
294 
295 static inline kvm_pfn_t spte_to_pfn(u64 pte)
296 {
297 	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
298 }
299 
300 static inline bool is_accessed_spte(u64 spte)
301 {
302 	u64 accessed_mask = spte_shadow_accessed_mask(spte);
303 
304 	return accessed_mask ? spte & accessed_mask
305 			     : !is_access_track_spte(spte);
306 }
307 
308 static inline bool is_dirty_spte(u64 spte)
309 {
310 	u64 dirty_mask = spte_shadow_dirty_mask(spte);
311 
312 	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
313 }
314 
315 static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
316 				int level)
317 {
318 	int bit7 = (pte >> 7) & 1;
319 
320 	return rsvd_check->rsvd_bits_mask[bit7][level-1];
321 }
322 
323 static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
324 				      u64 pte, int level)
325 {
326 	return pte & get_rsvd_bits(rsvd_check, pte, level);
327 }
328 
329 static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
330 				   u64 pte)
331 {
332 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
333 }
334 
335 static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
336 					 u64 spte, int level)
337 {
338 	return __is_bad_mt_xwr(rsvd_check, spte) ||
339 	       __is_rsvd_bits_set(rsvd_check, spte, level);
340 }
341 
342 static inline bool spte_can_locklessly_be_made_writable(u64 spte)
343 {
344 	if (spte & shadow_mmu_writable_mask) {
345 		WARN_ON_ONCE(!(spte & shadow_host_writable_mask));
346 		return true;
347 	}
348 
349 	WARN_ON_ONCE(spte & PT_WRITABLE_MASK);
350 	return false;
351 }
352 
353 static inline u64 get_mmio_spte_generation(u64 spte)
354 {
355 	u64 gen;
356 
357 	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
358 	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
359 	return gen;
360 }
361 
362 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
363 	       const struct kvm_memory_slot *slot,
364 	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
365 	       u64 old_spte, bool prefetch, bool can_unsync,
366 	       bool host_writable, u64 *new_spte);
367 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
368 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
369 u64 mark_spte_for_access_track(u64 spte);
370 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
371 
372 void kvm_mmu_reset_all_pte_masks(void);
373 
374 #endif
375