1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 20 static bool __read_mostly enable_mmio_caching = true; 21 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 22 23 u64 __read_mostly shadow_nx_mask; 24 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 25 u64 __read_mostly shadow_user_mask; 26 u64 __read_mostly shadow_accessed_mask; 27 u64 __read_mostly shadow_dirty_mask; 28 u64 __read_mostly shadow_mmio_value; 29 u64 __read_mostly shadow_mmio_mask; 30 u64 __read_mostly shadow_mmio_access_mask; 31 u64 __read_mostly shadow_present_mask; 32 u64 __read_mostly shadow_me_mask; 33 u64 __read_mostly shadow_acc_track_mask; 34 35 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 36 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 37 38 u8 __read_mostly shadow_phys_bits; 39 40 static u64 generation_mmio_spte_mask(u64 gen) 41 { 42 u64 mask; 43 44 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 45 46 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 47 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 48 return mask; 49 } 50 51 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 52 { 53 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 54 u64 spte = generation_mmio_spte_mask(gen); 55 u64 gpa = gfn << PAGE_SHIFT; 56 57 WARN_ON_ONCE(!shadow_mmio_value); 58 59 access &= shadow_mmio_access_mask; 60 spte |= shadow_mmio_value | access; 61 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 62 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 63 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 64 65 return spte; 66 } 67 68 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 69 { 70 if (pfn_valid(pfn)) 71 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 72 /* 73 * Some reserved pages, such as those from NVDIMM 74 * DAX devices, are not for MMIO, and can be mapped 75 * with cached memory type for better performance. 76 * However, the above check misconceives those pages 77 * as MMIO, and results in KVM mapping them with UC 78 * memory type, which would hurt the performance. 79 * Therefore, we check the host memory type in addition 80 * and only treat UC/UC-/WC pages as MMIO. 81 */ 82 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 83 84 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 85 pfn_to_hpa(pfn + 1) - 1, 86 E820_TYPE_RAM); 87 } 88 89 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, 90 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, 91 bool can_unsync, bool host_writable, bool ad_disabled, 92 u64 *new_spte) 93 { 94 u64 spte = 0; 95 int ret = 0; 96 97 if (ad_disabled) 98 spte |= SPTE_TDP_AD_DISABLED_MASK; 99 else if (kvm_vcpu_ad_need_write_protect(vcpu)) 100 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 101 102 /* 103 * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set 104 * if PAE paging may be employed (shadow paging or any 32-bit KVM). 105 */ 106 WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) && 107 (spte & SPTE_TDP_AD_MASK)); 108 109 /* 110 * For the EPT case, shadow_present_mask is 0 if hardware 111 * supports exec-only page table entries. In that case, 112 * ACC_USER_MASK and shadow_user_mask are used to represent 113 * read access. See FNAME(gpte_access) in paging_tmpl.h. 114 */ 115 spte |= shadow_present_mask; 116 if (!speculative) 117 spte |= spte_shadow_accessed_mask(spte); 118 119 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 120 is_nx_huge_page_enabled()) { 121 pte_access &= ~ACC_EXEC_MASK; 122 } 123 124 if (pte_access & ACC_EXEC_MASK) 125 spte |= shadow_x_mask; 126 else 127 spte |= shadow_nx_mask; 128 129 if (pte_access & ACC_USER_MASK) 130 spte |= shadow_user_mask; 131 132 if (level > PG_LEVEL_4K) 133 spte |= PT_PAGE_SIZE_MASK; 134 if (tdp_enabled) 135 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 136 kvm_is_mmio_pfn(pfn)); 137 138 if (host_writable) 139 spte |= SPTE_HOST_WRITEABLE; 140 else 141 pte_access &= ~ACC_WRITE_MASK; 142 143 if (!kvm_is_mmio_pfn(pfn)) 144 spte |= shadow_me_mask; 145 146 spte |= (u64)pfn << PAGE_SHIFT; 147 148 if (pte_access & ACC_WRITE_MASK) { 149 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; 150 151 /* 152 * Optimization: for pte sync, if spte was writable the hash 153 * lookup is unnecessary (and expensive). Write protection 154 * is responsibility of mmu_get_page / kvm_sync_page. 155 * Same reasoning can be applied to dirty page accounting. 156 */ 157 if (!can_unsync && is_writable_pte(old_spte)) 158 goto out; 159 160 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 161 pgprintk("%s: found shadow page for %llx, marking ro\n", 162 __func__, gfn); 163 ret |= SET_SPTE_WRITE_PROTECTED_PT; 164 pte_access &= ~ACC_WRITE_MASK; 165 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 166 } 167 } 168 169 if (pte_access & ACC_WRITE_MASK) 170 spte |= spte_shadow_dirty_mask(spte); 171 172 if (speculative) 173 spte = mark_spte_for_access_track(spte); 174 175 out: 176 WARN_ON(is_mmio_spte(spte)); 177 *new_spte = spte; 178 return ret; 179 } 180 181 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 182 { 183 u64 spte; 184 185 spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 186 shadow_user_mask | shadow_x_mask | shadow_me_mask; 187 188 if (ad_disabled) 189 spte |= SPTE_TDP_AD_DISABLED_MASK; 190 else 191 spte |= shadow_accessed_mask; 192 193 return spte; 194 } 195 196 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 197 { 198 u64 new_spte; 199 200 new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 201 new_spte |= (u64)new_pfn << PAGE_SHIFT; 202 203 new_spte &= ~PT_WRITABLE_MASK; 204 new_spte &= ~SPTE_HOST_WRITEABLE; 205 206 new_spte = mark_spte_for_access_track(new_spte); 207 208 return new_spte; 209 } 210 211 static u8 kvm_get_shadow_phys_bits(void) 212 { 213 /* 214 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 215 * in CPU detection code, but the processor treats those reduced bits as 216 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 217 * the physical address bits reported by CPUID. 218 */ 219 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 220 return cpuid_eax(0x80000008) & 0xff; 221 222 /* 223 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 224 * custom CPUID. Proceed with whatever the kernel found since these features 225 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 226 */ 227 return boot_cpu_data.x86_phys_bits; 228 } 229 230 u64 mark_spte_for_access_track(u64 spte) 231 { 232 if (spte_ad_enabled(spte)) 233 return spte & ~shadow_accessed_mask; 234 235 if (is_access_track_spte(spte)) 236 return spte; 237 238 /* 239 * Making an Access Tracking PTE will result in removal of write access 240 * from the PTE. So, verify that we will be able to restore the write 241 * access in the fast page fault path later on. 242 */ 243 WARN_ONCE((spte & PT_WRITABLE_MASK) && 244 !spte_can_locklessly_be_made_writable(spte), 245 "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 246 247 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 248 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 249 "kvm: Access Tracking saved bit locations are not zero\n"); 250 251 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 252 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 253 spte &= ~shadow_acc_track_mask; 254 255 return spte; 256 } 257 258 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 259 { 260 BUG_ON((u64)(unsigned)access_mask != access_mask); 261 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 262 263 if (!enable_mmio_caching) 264 mmio_value = 0; 265 266 /* 267 * Disable MMIO caching if the MMIO value collides with the bits that 268 * are used to hold the relocated GFN when the L1TF mitigation is 269 * enabled. This should never fire as there is no known hardware that 270 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 271 * MMIO value are not susceptible to L1TF. 272 */ 273 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 274 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 275 mmio_value = 0; 276 277 WARN_ON((mmio_value & mmio_mask) != mmio_value); 278 shadow_mmio_value = mmio_value; 279 shadow_mmio_mask = mmio_mask; 280 shadow_mmio_access_mask = access_mask; 281 } 282 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 283 284 /* 285 * Sets the shadow PTE masks used by the MMU. 286 * 287 * Assumptions: 288 * - Setting either @accessed_mask or @dirty_mask requires setting both 289 * - At least one of @accessed_mask or @acc_track_mask must be set 290 */ 291 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 292 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, 293 u64 acc_track_mask, u64 me_mask) 294 { 295 BUG_ON(!dirty_mask != !accessed_mask); 296 BUG_ON(!accessed_mask && !acc_track_mask); 297 BUG_ON(acc_track_mask & SPTE_TDP_AD_MASK); 298 299 shadow_user_mask = user_mask; 300 shadow_accessed_mask = accessed_mask; 301 shadow_dirty_mask = dirty_mask; 302 shadow_nx_mask = nx_mask; 303 shadow_x_mask = x_mask; 304 shadow_present_mask = p_mask; 305 shadow_acc_track_mask = acc_track_mask; 306 shadow_me_mask = me_mask; 307 } 308 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 309 310 void kvm_mmu_reset_all_pte_masks(void) 311 { 312 u8 low_phys_bits; 313 u64 mask; 314 315 shadow_user_mask = 0; 316 shadow_accessed_mask = 0; 317 shadow_dirty_mask = 0; 318 shadow_nx_mask = 0; 319 shadow_x_mask = 0; 320 shadow_present_mask = 0; 321 shadow_acc_track_mask = 0; 322 323 shadow_phys_bits = kvm_get_shadow_phys_bits(); 324 325 /* 326 * If the CPU has 46 or less physical address bits, then set an 327 * appropriate mask to guard against L1TF attacks. Otherwise, it is 328 * assumed that the CPU is not vulnerable to L1TF. 329 * 330 * Some Intel CPUs address the L1 cache using more PA bits than are 331 * reported by CPUID. Use the PA width of the L1 cache when possible 332 * to achieve more effective mitigation, e.g. if system RAM overlaps 333 * the most significant bits of legal physical address space. 334 */ 335 shadow_nonpresent_or_rsvd_mask = 0; 336 low_phys_bits = boot_cpu_data.x86_phys_bits; 337 if (boot_cpu_has_bug(X86_BUG_L1TF) && 338 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 339 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 340 low_phys_bits = boot_cpu_data.x86_cache_bits 341 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 342 shadow_nonpresent_or_rsvd_mask = 343 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 344 } 345 346 shadow_nonpresent_or_rsvd_lower_gfn_mask = 347 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 348 349 /* 350 * Set a reserved PA bit in MMIO SPTEs to generate page faults with 351 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 352 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 353 * 52-bit physical addresses then there are no reserved PA bits in the 354 * PTEs and so the reserved PA approach must be disabled. 355 */ 356 if (shadow_phys_bits < 52) 357 mask = BIT_ULL(51) | PT_PRESENT_MASK; 358 else 359 mask = 0; 360 361 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 362 363 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 364 PT_DIRTY_MASK, PT64_NX_MASK, 0, 365 PT_PRESENT_MASK, 0, sme_me_mask); 366 } 367