1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 #include <asm/vmx.h> 20 21 static bool __read_mostly enable_mmio_caching = true; 22 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 23 24 u64 __read_mostly shadow_host_writable_mask; 25 u64 __read_mostly shadow_mmu_writable_mask; 26 u64 __read_mostly shadow_nx_mask; 27 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 28 u64 __read_mostly shadow_user_mask; 29 u64 __read_mostly shadow_accessed_mask; 30 u64 __read_mostly shadow_dirty_mask; 31 u64 __read_mostly shadow_mmio_value; 32 u64 __read_mostly shadow_mmio_mask; 33 u64 __read_mostly shadow_mmio_access_mask; 34 u64 __read_mostly shadow_present_mask; 35 u64 __read_mostly shadow_me_mask; 36 u64 __read_mostly shadow_acc_track_mask; 37 38 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 39 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 40 41 u8 __read_mostly shadow_phys_bits; 42 43 static u64 generation_mmio_spte_mask(u64 gen) 44 { 45 u64 mask; 46 47 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 48 49 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 50 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 51 return mask; 52 } 53 54 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 55 { 56 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 57 u64 spte = generation_mmio_spte_mask(gen); 58 u64 gpa = gfn << PAGE_SHIFT; 59 60 WARN_ON_ONCE(!shadow_mmio_value); 61 62 access &= shadow_mmio_access_mask; 63 spte |= shadow_mmio_value | access; 64 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 65 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 66 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 67 68 return spte; 69 } 70 71 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 72 { 73 if (pfn_valid(pfn)) 74 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 75 /* 76 * Some reserved pages, such as those from NVDIMM 77 * DAX devices, are not for MMIO, and can be mapped 78 * with cached memory type for better performance. 79 * However, the above check misconceives those pages 80 * as MMIO, and results in KVM mapping them with UC 81 * memory type, which would hurt the performance. 82 * Therefore, we check the host memory type in addition 83 * and only treat UC/UC-/WC pages as MMIO. 84 */ 85 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 86 87 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 88 pfn_to_hpa(pfn + 1) - 1, 89 E820_TYPE_RAM); 90 } 91 92 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 93 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 94 u64 old_spte, bool speculative, bool can_unsync, 95 bool host_writable, u64 *new_spte) 96 { 97 int level = sp->role.level; 98 u64 spte = SPTE_MMU_PRESENT_MASK; 99 bool wrprot = false; 100 101 if (sp->role.ad_disabled) 102 spte |= SPTE_TDP_AD_DISABLED_MASK; 103 else if (kvm_vcpu_ad_need_write_protect(vcpu)) 104 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 105 106 /* 107 * For the EPT case, shadow_present_mask is 0 if hardware 108 * supports exec-only page table entries. In that case, 109 * ACC_USER_MASK and shadow_user_mask are used to represent 110 * read access. See FNAME(gpte_access) in paging_tmpl.h. 111 */ 112 spte |= shadow_present_mask; 113 if (!speculative) 114 spte |= spte_shadow_accessed_mask(spte); 115 116 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 117 is_nx_huge_page_enabled()) { 118 pte_access &= ~ACC_EXEC_MASK; 119 } 120 121 if (pte_access & ACC_EXEC_MASK) 122 spte |= shadow_x_mask; 123 else 124 spte |= shadow_nx_mask; 125 126 if (pte_access & ACC_USER_MASK) 127 spte |= shadow_user_mask; 128 129 if (level > PG_LEVEL_4K) 130 spte |= PT_PAGE_SIZE_MASK; 131 if (tdp_enabled) 132 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 133 kvm_is_mmio_pfn(pfn)); 134 135 if (host_writable) 136 spte |= shadow_host_writable_mask; 137 else 138 pte_access &= ~ACC_WRITE_MASK; 139 140 if (!kvm_is_mmio_pfn(pfn)) 141 spte |= shadow_me_mask; 142 143 spte |= (u64)pfn << PAGE_SHIFT; 144 145 if (pte_access & ACC_WRITE_MASK) { 146 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; 147 148 /* 149 * Optimization: for pte sync, if spte was writable the hash 150 * lookup is unnecessary (and expensive). Write protection 151 * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. 152 * Same reasoning can be applied to dirty page accounting. 153 */ 154 if (is_writable_pte(old_spte)) 155 goto out; 156 157 /* 158 * Unsync shadow pages that are reachable by the new, writable 159 * SPTE. Write-protect the SPTE if the page can't be unsync'd, 160 * e.g. it's write-tracked (upper-level SPs) or has one or more 161 * shadow pages and unsync'ing pages is not allowed. 162 */ 163 if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { 164 pgprintk("%s: found shadow page for %llx, marking ro\n", 165 __func__, gfn); 166 wrprot = true; 167 pte_access &= ~ACC_WRITE_MASK; 168 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 169 } 170 } 171 172 if (pte_access & ACC_WRITE_MASK) 173 spte |= spte_shadow_dirty_mask(spte); 174 175 out: 176 if (speculative) 177 spte = mark_spte_for_access_track(spte); 178 179 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), 180 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, 181 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); 182 183 if (spte & PT_WRITABLE_MASK) 184 kvm_vcpu_mark_page_dirty(vcpu, gfn); 185 186 *new_spte = spte; 187 return wrprot; 188 } 189 190 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 191 { 192 u64 spte = SPTE_MMU_PRESENT_MASK; 193 194 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 195 shadow_user_mask | shadow_x_mask | shadow_me_mask; 196 197 if (ad_disabled) 198 spte |= SPTE_TDP_AD_DISABLED_MASK; 199 else 200 spte |= shadow_accessed_mask; 201 202 return spte; 203 } 204 205 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 206 { 207 u64 new_spte; 208 209 new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 210 new_spte |= (u64)new_pfn << PAGE_SHIFT; 211 212 new_spte &= ~PT_WRITABLE_MASK; 213 new_spte &= ~shadow_host_writable_mask; 214 215 new_spte = mark_spte_for_access_track(new_spte); 216 217 return new_spte; 218 } 219 220 static u8 kvm_get_shadow_phys_bits(void) 221 { 222 /* 223 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 224 * in CPU detection code, but the processor treats those reduced bits as 225 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 226 * the physical address bits reported by CPUID. 227 */ 228 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 229 return cpuid_eax(0x80000008) & 0xff; 230 231 /* 232 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 233 * custom CPUID. Proceed with whatever the kernel found since these features 234 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 235 */ 236 return boot_cpu_data.x86_phys_bits; 237 } 238 239 u64 mark_spte_for_access_track(u64 spte) 240 { 241 if (spte_ad_enabled(spte)) 242 return spte & ~shadow_accessed_mask; 243 244 if (is_access_track_spte(spte)) 245 return spte; 246 247 /* 248 * Making an Access Tracking PTE will result in removal of write access 249 * from the PTE. So, verify that we will be able to restore the write 250 * access in the fast page fault path later on. 251 */ 252 WARN_ONCE((spte & PT_WRITABLE_MASK) && 253 !spte_can_locklessly_be_made_writable(spte), 254 "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 255 256 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 257 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 258 "kvm: Access Tracking saved bit locations are not zero\n"); 259 260 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 261 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 262 spte &= ~shadow_acc_track_mask; 263 264 return spte; 265 } 266 267 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 268 { 269 BUG_ON((u64)(unsigned)access_mask != access_mask); 270 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 271 272 if (!enable_mmio_caching) 273 mmio_value = 0; 274 275 /* 276 * Disable MMIO caching if the MMIO value collides with the bits that 277 * are used to hold the relocated GFN when the L1TF mitigation is 278 * enabled. This should never fire as there is no known hardware that 279 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 280 * MMIO value are not susceptible to L1TF. 281 */ 282 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 283 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 284 mmio_value = 0; 285 286 /* 287 * The masked MMIO value must obviously match itself and a removed SPTE 288 * must not get a false positive. Removed SPTEs and MMIO SPTEs should 289 * never collide as MMIO must set some RWX bits, and removed SPTEs must 290 * not set any RWX bits. 291 */ 292 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || 293 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) 294 mmio_value = 0; 295 296 shadow_mmio_value = mmio_value; 297 shadow_mmio_mask = mmio_mask; 298 shadow_mmio_access_mask = access_mask; 299 } 300 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 301 302 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 303 { 304 shadow_user_mask = VMX_EPT_READABLE_MASK; 305 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; 306 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; 307 shadow_nx_mask = 0ull; 308 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; 309 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; 310 shadow_acc_track_mask = VMX_EPT_RWX_MASK; 311 shadow_me_mask = 0ull; 312 313 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; 314 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; 315 316 /* 317 * EPT Misconfigurations are generated if the value of bits 2:0 318 * of an EPT paging-structure entry is 110b (write/execute). 319 */ 320 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 321 VMX_EPT_RWX_MASK, 0); 322 } 323 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 324 325 void kvm_mmu_reset_all_pte_masks(void) 326 { 327 u8 low_phys_bits; 328 u64 mask; 329 330 shadow_phys_bits = kvm_get_shadow_phys_bits(); 331 332 /* 333 * If the CPU has 46 or less physical address bits, then set an 334 * appropriate mask to guard against L1TF attacks. Otherwise, it is 335 * assumed that the CPU is not vulnerable to L1TF. 336 * 337 * Some Intel CPUs address the L1 cache using more PA bits than are 338 * reported by CPUID. Use the PA width of the L1 cache when possible 339 * to achieve more effective mitigation, e.g. if system RAM overlaps 340 * the most significant bits of legal physical address space. 341 */ 342 shadow_nonpresent_or_rsvd_mask = 0; 343 low_phys_bits = boot_cpu_data.x86_phys_bits; 344 if (boot_cpu_has_bug(X86_BUG_L1TF) && 345 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 346 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 347 low_phys_bits = boot_cpu_data.x86_cache_bits 348 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 349 shadow_nonpresent_or_rsvd_mask = 350 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 351 } 352 353 shadow_nonpresent_or_rsvd_lower_gfn_mask = 354 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 355 356 shadow_user_mask = PT_USER_MASK; 357 shadow_accessed_mask = PT_ACCESSED_MASK; 358 shadow_dirty_mask = PT_DIRTY_MASK; 359 shadow_nx_mask = PT64_NX_MASK; 360 shadow_x_mask = 0; 361 shadow_present_mask = PT_PRESENT_MASK; 362 shadow_acc_track_mask = 0; 363 shadow_me_mask = sme_me_mask; 364 365 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE; 366 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE; 367 368 /* 369 * Set a reserved PA bit in MMIO SPTEs to generate page faults with 370 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 371 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 372 * 52-bit physical addresses then there are no reserved PA bits in the 373 * PTEs and so the reserved PA approach must be disabled. 374 */ 375 if (shadow_phys_bits < 52) 376 mask = BIT_ULL(51) | PT_PRESENT_MASK; 377 else 378 mask = 0; 379 380 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 381 } 382