1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 20 u64 __read_mostly shadow_nx_mask; 21 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 22 u64 __read_mostly shadow_user_mask; 23 u64 __read_mostly shadow_accessed_mask; 24 u64 __read_mostly shadow_dirty_mask; 25 u64 __read_mostly shadow_mmio_value; 26 u64 __read_mostly shadow_mmio_access_mask; 27 u64 __read_mostly shadow_present_mask; 28 u64 __read_mostly shadow_me_mask; 29 u64 __read_mostly shadow_acc_track_mask; 30 31 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 32 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 33 34 u8 __read_mostly shadow_phys_bits; 35 36 static u64 generation_mmio_spte_mask(u64 gen) 37 { 38 u64 mask; 39 40 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 41 BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); 42 43 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 44 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 45 return mask; 46 } 47 48 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 49 { 50 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 51 u64 mask = generation_mmio_spte_mask(gen); 52 u64 gpa = gfn << PAGE_SHIFT; 53 54 WARN_ON_ONCE(!shadow_mmio_value); 55 56 access &= shadow_mmio_access_mask; 57 mask |= shadow_mmio_value | access; 58 mask |= gpa | shadow_nonpresent_or_rsvd_mask; 59 mask |= (gpa & shadow_nonpresent_or_rsvd_mask) 60 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 61 62 return mask; 63 } 64 65 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 66 { 67 if (pfn_valid(pfn)) 68 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 69 /* 70 * Some reserved pages, such as those from NVDIMM 71 * DAX devices, are not for MMIO, and can be mapped 72 * with cached memory type for better performance. 73 * However, the above check misconceives those pages 74 * as MMIO, and results in KVM mapping them with UC 75 * memory type, which would hurt the performance. 76 * Therefore, we check the host memory type in addition 77 * and only treat UC/UC-/WC pages as MMIO. 78 */ 79 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 80 81 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 82 pfn_to_hpa(pfn + 1) - 1, 83 E820_TYPE_RAM); 84 } 85 86 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, 87 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, 88 bool can_unsync, bool host_writable, bool ad_disabled, 89 u64 *new_spte) 90 { 91 u64 spte = 0; 92 int ret = 0; 93 94 if (ad_disabled) 95 spte |= SPTE_AD_DISABLED_MASK; 96 else if (kvm_vcpu_ad_need_write_protect(vcpu)) 97 spte |= SPTE_AD_WRPROT_ONLY_MASK; 98 99 /* 100 * For the EPT case, shadow_present_mask is 0 if hardware 101 * supports exec-only page table entries. In that case, 102 * ACC_USER_MASK and shadow_user_mask are used to represent 103 * read access. See FNAME(gpte_access) in paging_tmpl.h. 104 */ 105 spte |= shadow_present_mask; 106 if (!speculative) 107 spte |= spte_shadow_accessed_mask(spte); 108 109 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 110 is_nx_huge_page_enabled()) { 111 pte_access &= ~ACC_EXEC_MASK; 112 } 113 114 if (pte_access & ACC_EXEC_MASK) 115 spte |= shadow_x_mask; 116 else 117 spte |= shadow_nx_mask; 118 119 if (pte_access & ACC_USER_MASK) 120 spte |= shadow_user_mask; 121 122 if (level > PG_LEVEL_4K) 123 spte |= PT_PAGE_SIZE_MASK; 124 if (tdp_enabled) 125 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 126 kvm_is_mmio_pfn(pfn)); 127 128 if (host_writable) 129 spte |= SPTE_HOST_WRITEABLE; 130 else 131 pte_access &= ~ACC_WRITE_MASK; 132 133 if (!kvm_is_mmio_pfn(pfn)) 134 spte |= shadow_me_mask; 135 136 spte |= (u64)pfn << PAGE_SHIFT; 137 138 if (pte_access & ACC_WRITE_MASK) { 139 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; 140 141 /* 142 * Optimization: for pte sync, if spte was writable the hash 143 * lookup is unnecessary (and expensive). Write protection 144 * is responsibility of mmu_get_page / kvm_sync_page. 145 * Same reasoning can be applied to dirty page accounting. 146 */ 147 if (!can_unsync && is_writable_pte(old_spte)) 148 goto out; 149 150 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 151 pgprintk("%s: found shadow page for %llx, marking ro\n", 152 __func__, gfn); 153 ret |= SET_SPTE_WRITE_PROTECTED_PT; 154 pte_access &= ~ACC_WRITE_MASK; 155 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 156 } 157 } 158 159 if (pte_access & ACC_WRITE_MASK) 160 spte |= spte_shadow_dirty_mask(spte); 161 162 if (speculative) 163 spte = mark_spte_for_access_track(spte); 164 165 out: 166 *new_spte = spte; 167 return ret; 168 } 169 170 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 171 { 172 u64 spte; 173 174 spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 175 shadow_user_mask | shadow_x_mask | shadow_me_mask; 176 177 if (ad_disabled) 178 spte |= SPTE_AD_DISABLED_MASK; 179 else 180 spte |= shadow_accessed_mask; 181 182 return spte; 183 } 184 185 u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 186 { 187 u64 new_spte; 188 189 new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 190 new_spte |= (u64)new_pfn << PAGE_SHIFT; 191 192 new_spte &= ~PT_WRITABLE_MASK; 193 new_spte &= ~SPTE_HOST_WRITEABLE; 194 195 new_spte = mark_spte_for_access_track(new_spte); 196 197 return new_spte; 198 } 199 200 static u8 kvm_get_shadow_phys_bits(void) 201 { 202 /* 203 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 204 * in CPU detection code, but the processor treats those reduced bits as 205 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 206 * the physical address bits reported by CPUID. 207 */ 208 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 209 return cpuid_eax(0x80000008) & 0xff; 210 211 /* 212 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 213 * custom CPUID. Proceed with whatever the kernel found since these features 214 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 215 */ 216 return boot_cpu_data.x86_phys_bits; 217 } 218 219 u64 mark_spte_for_access_track(u64 spte) 220 { 221 if (spte_ad_enabled(spte)) 222 return spte & ~shadow_accessed_mask; 223 224 if (is_access_track_spte(spte)) 225 return spte; 226 227 /* 228 * Making an Access Tracking PTE will result in removal of write access 229 * from the PTE. So, verify that we will be able to restore the write 230 * access in the fast page fault path later on. 231 */ 232 WARN_ONCE((spte & PT_WRITABLE_MASK) && 233 !spte_can_locklessly_be_made_writable(spte), 234 "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 235 236 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 237 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 238 "kvm: Access Tracking saved bit locations are not zero\n"); 239 240 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 241 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 242 spte &= ~shadow_acc_track_mask; 243 244 return spte; 245 } 246 247 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) 248 { 249 BUG_ON((u64)(unsigned)access_mask != access_mask); 250 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 251 252 /* 253 * Disable MMIO caching if the MMIO value collides with the bits that 254 * are used to hold the relocated GFN when the L1TF mitigation is 255 * enabled. This should never fire as there is no known hardware that 256 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 257 * MMIO value are not susceptible to L1TF. 258 */ 259 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 260 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 261 mmio_value = 0; 262 263 if (mmio_value) 264 shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; 265 else 266 shadow_mmio_value = 0; 267 shadow_mmio_access_mask = access_mask; 268 } 269 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 270 271 /* 272 * Sets the shadow PTE masks used by the MMU. 273 * 274 * Assumptions: 275 * - Setting either @accessed_mask or @dirty_mask requires setting both 276 * - At least one of @accessed_mask or @acc_track_mask must be set 277 */ 278 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 279 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, 280 u64 acc_track_mask, u64 me_mask) 281 { 282 BUG_ON(!dirty_mask != !accessed_mask); 283 BUG_ON(!accessed_mask && !acc_track_mask); 284 BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); 285 286 shadow_user_mask = user_mask; 287 shadow_accessed_mask = accessed_mask; 288 shadow_dirty_mask = dirty_mask; 289 shadow_nx_mask = nx_mask; 290 shadow_x_mask = x_mask; 291 shadow_present_mask = p_mask; 292 shadow_acc_track_mask = acc_track_mask; 293 shadow_me_mask = me_mask; 294 } 295 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 296 297 void kvm_mmu_reset_all_pte_masks(void) 298 { 299 u8 low_phys_bits; 300 301 shadow_user_mask = 0; 302 shadow_accessed_mask = 0; 303 shadow_dirty_mask = 0; 304 shadow_nx_mask = 0; 305 shadow_x_mask = 0; 306 shadow_present_mask = 0; 307 shadow_acc_track_mask = 0; 308 309 shadow_phys_bits = kvm_get_shadow_phys_bits(); 310 311 /* 312 * If the CPU has 46 or less physical address bits, then set an 313 * appropriate mask to guard against L1TF attacks. Otherwise, it is 314 * assumed that the CPU is not vulnerable to L1TF. 315 * 316 * Some Intel CPUs address the L1 cache using more PA bits than are 317 * reported by CPUID. Use the PA width of the L1 cache when possible 318 * to achieve more effective mitigation, e.g. if system RAM overlaps 319 * the most significant bits of legal physical address space. 320 */ 321 shadow_nonpresent_or_rsvd_mask = 0; 322 low_phys_bits = boot_cpu_data.x86_phys_bits; 323 if (boot_cpu_has_bug(X86_BUG_L1TF) && 324 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 325 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 326 low_phys_bits = boot_cpu_data.x86_cache_bits 327 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 328 shadow_nonpresent_or_rsvd_mask = 329 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 330 } 331 332 shadow_nonpresent_or_rsvd_lower_gfn_mask = 333 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 334 } 335