1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 20 static bool __read_mostly enable_mmio_caching = true; 21 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 22 23 u64 __read_mostly shadow_nx_mask; 24 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 25 u64 __read_mostly shadow_user_mask; 26 u64 __read_mostly shadow_accessed_mask; 27 u64 __read_mostly shadow_dirty_mask; 28 u64 __read_mostly shadow_mmio_value; 29 u64 __read_mostly shadow_mmio_mask; 30 u64 __read_mostly shadow_mmio_access_mask; 31 u64 __read_mostly shadow_present_mask; 32 u64 __read_mostly shadow_me_mask; 33 u64 __read_mostly shadow_acc_track_mask; 34 35 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 36 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 37 38 u8 __read_mostly shadow_phys_bits; 39 40 static u64 generation_mmio_spte_mask(u64 gen) 41 { 42 u64 mask; 43 44 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 45 BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_TDP_AD_MASK); 46 47 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 48 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 49 return mask; 50 } 51 52 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 53 { 54 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 55 u64 spte = generation_mmio_spte_mask(gen); 56 u64 gpa = gfn << PAGE_SHIFT; 57 58 WARN_ON_ONCE(!shadow_mmio_value); 59 60 access &= shadow_mmio_access_mask; 61 spte |= shadow_mmio_value | access; 62 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 63 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 64 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 65 66 return spte; 67 } 68 69 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 70 { 71 if (pfn_valid(pfn)) 72 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 73 /* 74 * Some reserved pages, such as those from NVDIMM 75 * DAX devices, are not for MMIO, and can be mapped 76 * with cached memory type for better performance. 77 * However, the above check misconceives those pages 78 * as MMIO, and results in KVM mapping them with UC 79 * memory type, which would hurt the performance. 80 * Therefore, we check the host memory type in addition 81 * and only treat UC/UC-/WC pages as MMIO. 82 */ 83 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 84 85 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 86 pfn_to_hpa(pfn + 1) - 1, 87 E820_TYPE_RAM); 88 } 89 90 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, 91 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, 92 bool can_unsync, bool host_writable, bool ad_disabled, 93 u64 *new_spte) 94 { 95 u64 spte = 0; 96 int ret = 0; 97 98 if (ad_disabled) 99 spte |= SPTE_TDP_AD_DISABLED_MASK; 100 else if (kvm_vcpu_ad_need_write_protect(vcpu)) 101 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 102 103 /* 104 * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set 105 * if PAE paging may be employed (shadow paging or any 32-bit KVM). 106 */ 107 WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) && 108 (spte & SPTE_TDP_AD_MASK)); 109 110 /* 111 * For the EPT case, shadow_present_mask is 0 if hardware 112 * supports exec-only page table entries. In that case, 113 * ACC_USER_MASK and shadow_user_mask are used to represent 114 * read access. See FNAME(gpte_access) in paging_tmpl.h. 115 */ 116 spte |= shadow_present_mask; 117 if (!speculative) 118 spte |= spte_shadow_accessed_mask(spte); 119 120 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 121 is_nx_huge_page_enabled()) { 122 pte_access &= ~ACC_EXEC_MASK; 123 } 124 125 if (pte_access & ACC_EXEC_MASK) 126 spte |= shadow_x_mask; 127 else 128 spte |= shadow_nx_mask; 129 130 if (pte_access & ACC_USER_MASK) 131 spte |= shadow_user_mask; 132 133 if (level > PG_LEVEL_4K) 134 spte |= PT_PAGE_SIZE_MASK; 135 if (tdp_enabled) 136 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 137 kvm_is_mmio_pfn(pfn)); 138 139 if (host_writable) 140 spte |= SPTE_HOST_WRITEABLE; 141 else 142 pte_access &= ~ACC_WRITE_MASK; 143 144 if (!kvm_is_mmio_pfn(pfn)) 145 spte |= shadow_me_mask; 146 147 spte |= (u64)pfn << PAGE_SHIFT; 148 149 if (pte_access & ACC_WRITE_MASK) { 150 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; 151 152 /* 153 * Optimization: for pte sync, if spte was writable the hash 154 * lookup is unnecessary (and expensive). Write protection 155 * is responsibility of mmu_get_page / kvm_sync_page. 156 * Same reasoning can be applied to dirty page accounting. 157 */ 158 if (!can_unsync && is_writable_pte(old_spte)) 159 goto out; 160 161 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 162 pgprintk("%s: found shadow page for %llx, marking ro\n", 163 __func__, gfn); 164 ret |= SET_SPTE_WRITE_PROTECTED_PT; 165 pte_access &= ~ACC_WRITE_MASK; 166 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 167 } 168 } 169 170 if (pte_access & ACC_WRITE_MASK) 171 spte |= spte_shadow_dirty_mask(spte); 172 173 if (speculative) 174 spte = mark_spte_for_access_track(spte); 175 176 out: 177 WARN_ON(is_mmio_spte(spte)); 178 *new_spte = spte; 179 return ret; 180 } 181 182 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 183 { 184 u64 spte; 185 186 spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 187 shadow_user_mask | shadow_x_mask | shadow_me_mask; 188 189 if (ad_disabled) 190 spte |= SPTE_TDP_AD_DISABLED_MASK; 191 else 192 spte |= shadow_accessed_mask; 193 194 return spte; 195 } 196 197 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 198 { 199 u64 new_spte; 200 201 new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 202 new_spte |= (u64)new_pfn << PAGE_SHIFT; 203 204 new_spte &= ~PT_WRITABLE_MASK; 205 new_spte &= ~SPTE_HOST_WRITEABLE; 206 207 new_spte = mark_spte_for_access_track(new_spte); 208 209 return new_spte; 210 } 211 212 static u8 kvm_get_shadow_phys_bits(void) 213 { 214 /* 215 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 216 * in CPU detection code, but the processor treats those reduced bits as 217 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 218 * the physical address bits reported by CPUID. 219 */ 220 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 221 return cpuid_eax(0x80000008) & 0xff; 222 223 /* 224 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 225 * custom CPUID. Proceed with whatever the kernel found since these features 226 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 227 */ 228 return boot_cpu_data.x86_phys_bits; 229 } 230 231 u64 mark_spte_for_access_track(u64 spte) 232 { 233 if (spte_ad_enabled(spte)) 234 return spte & ~shadow_accessed_mask; 235 236 if (is_access_track_spte(spte)) 237 return spte; 238 239 /* 240 * Making an Access Tracking PTE will result in removal of write access 241 * from the PTE. So, verify that we will be able to restore the write 242 * access in the fast page fault path later on. 243 */ 244 WARN_ONCE((spte & PT_WRITABLE_MASK) && 245 !spte_can_locklessly_be_made_writable(spte), 246 "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 247 248 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 249 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 250 "kvm: Access Tracking saved bit locations are not zero\n"); 251 252 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 253 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 254 spte &= ~shadow_acc_track_mask; 255 256 return spte; 257 } 258 259 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 260 { 261 BUG_ON((u64)(unsigned)access_mask != access_mask); 262 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 263 264 if (!enable_mmio_caching) 265 mmio_value = 0; 266 267 /* 268 * Disable MMIO caching if the MMIO value collides with the bits that 269 * are used to hold the relocated GFN when the L1TF mitigation is 270 * enabled. This should never fire as there is no known hardware that 271 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 272 * MMIO value are not susceptible to L1TF. 273 */ 274 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 275 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 276 mmio_value = 0; 277 278 WARN_ON((mmio_value & mmio_mask) != mmio_value); 279 shadow_mmio_value = mmio_value; 280 shadow_mmio_mask = mmio_mask; 281 shadow_mmio_access_mask = access_mask; 282 } 283 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 284 285 /* 286 * Sets the shadow PTE masks used by the MMU. 287 * 288 * Assumptions: 289 * - Setting either @accessed_mask or @dirty_mask requires setting both 290 * - At least one of @accessed_mask or @acc_track_mask must be set 291 */ 292 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 293 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, 294 u64 acc_track_mask, u64 me_mask) 295 { 296 BUG_ON(!dirty_mask != !accessed_mask); 297 BUG_ON(!accessed_mask && !acc_track_mask); 298 BUG_ON(acc_track_mask & SPTE_TDP_AD_MASK); 299 300 shadow_user_mask = user_mask; 301 shadow_accessed_mask = accessed_mask; 302 shadow_dirty_mask = dirty_mask; 303 shadow_nx_mask = nx_mask; 304 shadow_x_mask = x_mask; 305 shadow_present_mask = p_mask; 306 shadow_acc_track_mask = acc_track_mask; 307 shadow_me_mask = me_mask; 308 } 309 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 310 311 void kvm_mmu_reset_all_pte_masks(void) 312 { 313 u8 low_phys_bits; 314 315 shadow_user_mask = 0; 316 shadow_accessed_mask = 0; 317 shadow_dirty_mask = 0; 318 shadow_nx_mask = 0; 319 shadow_x_mask = 0; 320 shadow_present_mask = 0; 321 shadow_acc_track_mask = 0; 322 323 shadow_phys_bits = kvm_get_shadow_phys_bits(); 324 325 /* 326 * If the CPU has 46 or less physical address bits, then set an 327 * appropriate mask to guard against L1TF attacks. Otherwise, it is 328 * assumed that the CPU is not vulnerable to L1TF. 329 * 330 * Some Intel CPUs address the L1 cache using more PA bits than are 331 * reported by CPUID. Use the PA width of the L1 cache when possible 332 * to achieve more effective mitigation, e.g. if system RAM overlaps 333 * the most significant bits of legal physical address space. 334 */ 335 shadow_nonpresent_or_rsvd_mask = 0; 336 low_phys_bits = boot_cpu_data.x86_phys_bits; 337 if (boot_cpu_has_bug(X86_BUG_L1TF) && 338 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 339 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 340 low_phys_bits = boot_cpu_data.x86_cache_bits 341 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 342 shadow_nonpresent_or_rsvd_mask = 343 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 344 } 345 346 shadow_nonpresent_or_rsvd_lower_gfn_mask = 347 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 348 } 349