1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 #include <asm/vmx.h> 20 21 static bool __read_mostly enable_mmio_caching = true; 22 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 23 24 u64 __read_mostly shadow_host_writable_mask; 25 u64 __read_mostly shadow_mmu_writable_mask; 26 u64 __read_mostly shadow_nx_mask; 27 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 28 u64 __read_mostly shadow_user_mask; 29 u64 __read_mostly shadow_accessed_mask; 30 u64 __read_mostly shadow_dirty_mask; 31 u64 __read_mostly shadow_mmio_value; 32 u64 __read_mostly shadow_mmio_mask; 33 u64 __read_mostly shadow_mmio_access_mask; 34 u64 __read_mostly shadow_present_mask; 35 u64 __read_mostly shadow_me_mask; 36 u64 __read_mostly shadow_acc_track_mask; 37 38 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 39 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 40 41 u8 __read_mostly shadow_phys_bits; 42 43 static u64 generation_mmio_spte_mask(u64 gen) 44 { 45 u64 mask; 46 47 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 48 49 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 50 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 51 return mask; 52 } 53 54 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 55 { 56 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 57 u64 spte = generation_mmio_spte_mask(gen); 58 u64 gpa = gfn << PAGE_SHIFT; 59 60 WARN_ON_ONCE(!shadow_mmio_value); 61 62 access &= shadow_mmio_access_mask; 63 spte |= shadow_mmio_value | access; 64 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 65 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 66 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 67 68 return spte; 69 } 70 71 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 72 { 73 if (pfn_valid(pfn)) 74 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 75 /* 76 * Some reserved pages, such as those from NVDIMM 77 * DAX devices, are not for MMIO, and can be mapped 78 * with cached memory type for better performance. 79 * However, the above check misconceives those pages 80 * as MMIO, and results in KVM mapping them with UC 81 * memory type, which would hurt the performance. 82 * Therefore, we check the host memory type in addition 83 * and only treat UC/UC-/WC pages as MMIO. 84 */ 85 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 86 87 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 88 pfn_to_hpa(pfn + 1) - 1, 89 E820_TYPE_RAM); 90 } 91 92 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, 93 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, 94 bool can_unsync, bool host_writable, bool ad_disabled, 95 u64 *new_spte) 96 { 97 u64 spte = SPTE_MMU_PRESENT_MASK; 98 int ret = 0; 99 100 if (ad_disabled) 101 spte |= SPTE_TDP_AD_DISABLED_MASK; 102 else if (kvm_vcpu_ad_need_write_protect(vcpu)) 103 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 104 105 /* 106 * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set 107 * if PAE paging may be employed (shadow paging or any 32-bit KVM). 108 */ 109 WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) && 110 (spte & SPTE_TDP_AD_MASK)); 111 112 /* 113 * For the EPT case, shadow_present_mask is 0 if hardware 114 * supports exec-only page table entries. In that case, 115 * ACC_USER_MASK and shadow_user_mask are used to represent 116 * read access. See FNAME(gpte_access) in paging_tmpl.h. 117 */ 118 spte |= shadow_present_mask; 119 if (!speculative) 120 spte |= spte_shadow_accessed_mask(spte); 121 122 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 123 is_nx_huge_page_enabled()) { 124 pte_access &= ~ACC_EXEC_MASK; 125 } 126 127 if (pte_access & ACC_EXEC_MASK) 128 spte |= shadow_x_mask; 129 else 130 spte |= shadow_nx_mask; 131 132 if (pte_access & ACC_USER_MASK) 133 spte |= shadow_user_mask; 134 135 if (level > PG_LEVEL_4K) 136 spte |= PT_PAGE_SIZE_MASK; 137 if (tdp_enabled) 138 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 139 kvm_is_mmio_pfn(pfn)); 140 141 if (host_writable) 142 spte |= shadow_host_writable_mask; 143 else 144 pte_access &= ~ACC_WRITE_MASK; 145 146 if (!kvm_is_mmio_pfn(pfn)) 147 spte |= shadow_me_mask; 148 149 spte |= (u64)pfn << PAGE_SHIFT; 150 151 if (pte_access & ACC_WRITE_MASK) { 152 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; 153 154 /* 155 * Optimization: for pte sync, if spte was writable the hash 156 * lookup is unnecessary (and expensive). Write protection 157 * is responsibility of mmu_get_page / kvm_sync_page. 158 * Same reasoning can be applied to dirty page accounting. 159 */ 160 if (!can_unsync && is_writable_pte(old_spte)) 161 goto out; 162 163 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 164 pgprintk("%s: found shadow page for %llx, marking ro\n", 165 __func__, gfn); 166 ret |= SET_SPTE_WRITE_PROTECTED_PT; 167 pte_access &= ~ACC_WRITE_MASK; 168 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 169 } 170 } 171 172 if (pte_access & ACC_WRITE_MASK) 173 spte |= spte_shadow_dirty_mask(spte); 174 175 if (speculative) 176 spte = mark_spte_for_access_track(spte); 177 178 out: 179 WARN_ON(is_mmio_spte(spte)); 180 *new_spte = spte; 181 return ret; 182 } 183 184 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 185 { 186 u64 spte = SPTE_MMU_PRESENT_MASK; 187 188 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 189 shadow_user_mask | shadow_x_mask | shadow_me_mask; 190 191 if (ad_disabled) 192 spte |= SPTE_TDP_AD_DISABLED_MASK; 193 else 194 spte |= shadow_accessed_mask; 195 196 return spte; 197 } 198 199 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 200 { 201 u64 new_spte; 202 203 new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 204 new_spte |= (u64)new_pfn << PAGE_SHIFT; 205 206 new_spte &= ~PT_WRITABLE_MASK; 207 new_spte &= ~shadow_host_writable_mask; 208 209 new_spte = mark_spte_for_access_track(new_spte); 210 211 return new_spte; 212 } 213 214 static u8 kvm_get_shadow_phys_bits(void) 215 { 216 /* 217 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 218 * in CPU detection code, but the processor treats those reduced bits as 219 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 220 * the physical address bits reported by CPUID. 221 */ 222 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 223 return cpuid_eax(0x80000008) & 0xff; 224 225 /* 226 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 227 * custom CPUID. Proceed with whatever the kernel found since these features 228 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 229 */ 230 return boot_cpu_data.x86_phys_bits; 231 } 232 233 u64 mark_spte_for_access_track(u64 spte) 234 { 235 if (spte_ad_enabled(spte)) 236 return spte & ~shadow_accessed_mask; 237 238 if (is_access_track_spte(spte)) 239 return spte; 240 241 /* 242 * Making an Access Tracking PTE will result in removal of write access 243 * from the PTE. So, verify that we will be able to restore the write 244 * access in the fast page fault path later on. 245 */ 246 WARN_ONCE((spte & PT_WRITABLE_MASK) && 247 !spte_can_locklessly_be_made_writable(spte), 248 "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 249 250 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 251 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 252 "kvm: Access Tracking saved bit locations are not zero\n"); 253 254 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 255 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 256 spte &= ~shadow_acc_track_mask; 257 258 return spte; 259 } 260 261 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 262 { 263 BUG_ON((u64)(unsigned)access_mask != access_mask); 264 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 265 266 if (!enable_mmio_caching) 267 mmio_value = 0; 268 269 /* 270 * Disable MMIO caching if the MMIO value collides with the bits that 271 * are used to hold the relocated GFN when the L1TF mitigation is 272 * enabled. This should never fire as there is no known hardware that 273 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 274 * MMIO value are not susceptible to L1TF. 275 */ 276 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 277 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 278 mmio_value = 0; 279 280 /* 281 * The masked MMIO value must obviously match itself and a removed SPTE 282 * must not get a false positive. Removed SPTEs and MMIO SPTEs should 283 * never collide as MMIO must set some RWX bits, and removed SPTEs must 284 * not set any RWX bits. 285 */ 286 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || 287 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) 288 mmio_value = 0; 289 290 shadow_mmio_value = mmio_value; 291 shadow_mmio_mask = mmio_mask; 292 shadow_mmio_access_mask = access_mask; 293 } 294 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 295 296 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 297 { 298 shadow_user_mask = VMX_EPT_READABLE_MASK; 299 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; 300 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; 301 shadow_nx_mask = 0ull; 302 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; 303 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; 304 shadow_acc_track_mask = VMX_EPT_RWX_MASK; 305 shadow_me_mask = 0ull; 306 307 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; 308 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; 309 310 /* 311 * EPT Misconfigurations are generated if the value of bits 2:0 312 * of an EPT paging-structure entry is 110b (write/execute). 313 */ 314 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 315 VMX_EPT_RWX_MASK, 0); 316 } 317 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 318 319 void kvm_mmu_reset_all_pte_masks(void) 320 { 321 u8 low_phys_bits; 322 u64 mask; 323 324 shadow_phys_bits = kvm_get_shadow_phys_bits(); 325 326 /* 327 * If the CPU has 46 or less physical address bits, then set an 328 * appropriate mask to guard against L1TF attacks. Otherwise, it is 329 * assumed that the CPU is not vulnerable to L1TF. 330 * 331 * Some Intel CPUs address the L1 cache using more PA bits than are 332 * reported by CPUID. Use the PA width of the L1 cache when possible 333 * to achieve more effective mitigation, e.g. if system RAM overlaps 334 * the most significant bits of legal physical address space. 335 */ 336 shadow_nonpresent_or_rsvd_mask = 0; 337 low_phys_bits = boot_cpu_data.x86_phys_bits; 338 if (boot_cpu_has_bug(X86_BUG_L1TF) && 339 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 340 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 341 low_phys_bits = boot_cpu_data.x86_cache_bits 342 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 343 shadow_nonpresent_or_rsvd_mask = 344 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 345 } 346 347 shadow_nonpresent_or_rsvd_lower_gfn_mask = 348 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 349 350 shadow_user_mask = PT_USER_MASK; 351 shadow_accessed_mask = PT_ACCESSED_MASK; 352 shadow_dirty_mask = PT_DIRTY_MASK; 353 shadow_nx_mask = PT64_NX_MASK; 354 shadow_x_mask = 0; 355 shadow_present_mask = PT_PRESENT_MASK; 356 shadow_acc_track_mask = 0; 357 shadow_me_mask = sme_me_mask; 358 359 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE; 360 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE; 361 362 /* 363 * Set a reserved PA bit in MMIO SPTEs to generate page faults with 364 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 365 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 366 * 52-bit physical addresses then there are no reserved PA bits in the 367 * PTEs and so the reserved PA approach must be disabled. 368 */ 369 if (shadow_phys_bits < 52) 370 mask = BIT_ULL(51) | PT_PRESENT_MASK; 371 else 372 mask = 0; 373 374 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 375 } 376