15a9624afSPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only 25a9624afSPaolo Bonzini /* 35a9624afSPaolo Bonzini * Kernel-based Virtual Machine driver for Linux 45a9624afSPaolo Bonzini * 55a9624afSPaolo Bonzini * Macros and functions to access KVM PTEs (also known as SPTEs) 65a9624afSPaolo Bonzini * 75a9624afSPaolo Bonzini * Copyright (C) 2006 Qumranet, Inc. 85a9624afSPaolo Bonzini * Copyright 2020 Red Hat, Inc. and/or its affiliates. 95a9624afSPaolo Bonzini */ 105a9624afSPaolo Bonzini 115a9624afSPaolo Bonzini 125a9624afSPaolo Bonzini #include <linux/kvm_host.h> 135a9624afSPaolo Bonzini #include "mmu.h" 145a9624afSPaolo Bonzini #include "mmu_internal.h" 155a9624afSPaolo Bonzini #include "x86.h" 165a9624afSPaolo Bonzini #include "spte.h" 175a9624afSPaolo Bonzini 185a9624afSPaolo Bonzini #include <asm/e820/api.h> 19*4d5cff69SChristoph Hellwig #include <asm/memtype.h> 20e7b7bdeaSSean Christopherson #include <asm/vmx.h> 215a9624afSPaolo Bonzini 22b09763daSSean Christopherson static bool __read_mostly enable_mmio_caching = true; 23b09763daSSean Christopherson module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 24b09763daSSean Christopherson 255fc3424fSSean Christopherson u64 __read_mostly shadow_host_writable_mask; 265fc3424fSSean Christopherson u64 __read_mostly shadow_mmu_writable_mask; 275a9624afSPaolo Bonzini u64 __read_mostly shadow_nx_mask; 285a9624afSPaolo Bonzini u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 295a9624afSPaolo Bonzini u64 __read_mostly shadow_user_mask; 305a9624afSPaolo Bonzini u64 __read_mostly shadow_accessed_mask; 315a9624afSPaolo Bonzini u64 __read_mostly shadow_dirty_mask; 325a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_value; 338120337aSSean Christopherson u64 __read_mostly shadow_mmio_mask; 345a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_access_mask; 355a9624afSPaolo Bonzini u64 __read_mostly shadow_present_mask; 365a9624afSPaolo Bonzini u64 __read_mostly shadow_me_mask; 375a9624afSPaolo Bonzini u64 __read_mostly shadow_acc_track_mask; 385a9624afSPaolo Bonzini 395a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 405a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 415a9624afSPaolo Bonzini 425a9624afSPaolo Bonzini u8 __read_mostly shadow_phys_bits; 435a9624afSPaolo Bonzini 445a9624afSPaolo Bonzini static u64 generation_mmio_spte_mask(u64 gen) 455a9624afSPaolo Bonzini { 465a9624afSPaolo Bonzini u64 mask; 475a9624afSPaolo Bonzini 485a9624afSPaolo Bonzini WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 495a9624afSPaolo Bonzini 5034c0f6f2SMaciej S. Szmigiero mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 5134c0f6f2SMaciej S. Szmigiero mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 525a9624afSPaolo Bonzini return mask; 535a9624afSPaolo Bonzini } 545a9624afSPaolo Bonzini 555a9624afSPaolo Bonzini u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 565a9624afSPaolo Bonzini { 575a9624afSPaolo Bonzini u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 58c236d962SSean Christopherson u64 spte = generation_mmio_spte_mask(gen); 595a9624afSPaolo Bonzini u64 gpa = gfn << PAGE_SHIFT; 605a9624afSPaolo Bonzini 6130ab5901SSean Christopherson WARN_ON_ONCE(!shadow_mmio_value); 6230ab5901SSean Christopherson 635a9624afSPaolo Bonzini access &= shadow_mmio_access_mask; 64c236d962SSean Christopherson spte |= shadow_mmio_value | access; 65c236d962SSean Christopherson spte |= gpa | shadow_nonpresent_or_rsvd_mask; 66c236d962SSean Christopherson spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 678a967d65SPaolo Bonzini << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 685a9624afSPaolo Bonzini 69c236d962SSean Christopherson return spte; 705a9624afSPaolo Bonzini } 715a9624afSPaolo Bonzini 725a9624afSPaolo Bonzini static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 735a9624afSPaolo Bonzini { 745a9624afSPaolo Bonzini if (pfn_valid(pfn)) 755a9624afSPaolo Bonzini return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 765a9624afSPaolo Bonzini /* 775a9624afSPaolo Bonzini * Some reserved pages, such as those from NVDIMM 785a9624afSPaolo Bonzini * DAX devices, are not for MMIO, and can be mapped 795a9624afSPaolo Bonzini * with cached memory type for better performance. 805a9624afSPaolo Bonzini * However, the above check misconceives those pages 815a9624afSPaolo Bonzini * as MMIO, and results in KVM mapping them with UC 825a9624afSPaolo Bonzini * memory type, which would hurt the performance. 835a9624afSPaolo Bonzini * Therefore, we check the host memory type in addition 845a9624afSPaolo Bonzini * and only treat UC/UC-/WC pages as MMIO. 855a9624afSPaolo Bonzini */ 865a9624afSPaolo Bonzini (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 875a9624afSPaolo Bonzini 885a9624afSPaolo Bonzini return !e820__mapped_raw_any(pfn_to_hpa(pfn), 895a9624afSPaolo Bonzini pfn_to_hpa(pfn + 1) - 1, 905a9624afSPaolo Bonzini E820_TYPE_RAM); 915a9624afSPaolo Bonzini } 925a9624afSPaolo Bonzini 937158bee4SPaolo Bonzini bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 9453597858SDavid Matlack struct kvm_memory_slot *slot, 957158bee4SPaolo Bonzini unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 962839180cSPaolo Bonzini u64 old_spte, bool prefetch, bool can_unsync, 977158bee4SPaolo Bonzini bool host_writable, u64 *new_spte) 985a9624afSPaolo Bonzini { 997158bee4SPaolo Bonzini int level = sp->role.level; 100edea7c4fSSean Christopherson u64 spte = SPTE_MMU_PRESENT_MASK; 101ad67e480SPaolo Bonzini bool wrprot = false; 1025a9624afSPaolo Bonzini 1037158bee4SPaolo Bonzini if (sp->role.ad_disabled) 1048a406c89SSean Christopherson spte |= SPTE_TDP_AD_DISABLED_MASK; 1055a9624afSPaolo Bonzini else if (kvm_vcpu_ad_need_write_protect(vcpu)) 1068a406c89SSean Christopherson spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 1078a406c89SSean Christopherson 1088a406c89SSean Christopherson /* 1095a9624afSPaolo Bonzini * For the EPT case, shadow_present_mask is 0 if hardware 1105a9624afSPaolo Bonzini * supports exec-only page table entries. In that case, 1115a9624afSPaolo Bonzini * ACC_USER_MASK and shadow_user_mask are used to represent 1125a9624afSPaolo Bonzini * read access. See FNAME(gpte_access) in paging_tmpl.h. 1135a9624afSPaolo Bonzini */ 1145a9624afSPaolo Bonzini spte |= shadow_present_mask; 1152839180cSPaolo Bonzini if (!prefetch) 1165a9624afSPaolo Bonzini spte |= spte_shadow_accessed_mask(spte); 1175a9624afSPaolo Bonzini 1185a9624afSPaolo Bonzini if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 1195a9624afSPaolo Bonzini is_nx_huge_page_enabled()) { 1205a9624afSPaolo Bonzini pte_access &= ~ACC_EXEC_MASK; 1215a9624afSPaolo Bonzini } 1225a9624afSPaolo Bonzini 1235a9624afSPaolo Bonzini if (pte_access & ACC_EXEC_MASK) 1245a9624afSPaolo Bonzini spte |= shadow_x_mask; 1255a9624afSPaolo Bonzini else 1265a9624afSPaolo Bonzini spte |= shadow_nx_mask; 1275a9624afSPaolo Bonzini 1285a9624afSPaolo Bonzini if (pte_access & ACC_USER_MASK) 1295a9624afSPaolo Bonzini spte |= shadow_user_mask; 1305a9624afSPaolo Bonzini 1315a9624afSPaolo Bonzini if (level > PG_LEVEL_4K) 1325a9624afSPaolo Bonzini spte |= PT_PAGE_SIZE_MASK; 1335a9624afSPaolo Bonzini if (tdp_enabled) 134b3646477SJason Baron spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 1355a9624afSPaolo Bonzini kvm_is_mmio_pfn(pfn)); 1365a9624afSPaolo Bonzini 1375a9624afSPaolo Bonzini if (host_writable) 1385fc3424fSSean Christopherson spte |= shadow_host_writable_mask; 1395a9624afSPaolo Bonzini else 1405a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 1415a9624afSPaolo Bonzini 1425a9624afSPaolo Bonzini if (!kvm_is_mmio_pfn(pfn)) 1435a9624afSPaolo Bonzini spte |= shadow_me_mask; 1445a9624afSPaolo Bonzini 1455a9624afSPaolo Bonzini spte |= (u64)pfn << PAGE_SHIFT; 1465a9624afSPaolo Bonzini 1475a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) { 1485fc3424fSSean Christopherson spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; 1495a9624afSPaolo Bonzini 1505a9624afSPaolo Bonzini /* 1515a9624afSPaolo Bonzini * Optimization: for pte sync, if spte was writable the hash 1525a9624afSPaolo Bonzini * lookup is unnecessary (and expensive). Write protection 1530337f585SSean Christopherson * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. 1545a9624afSPaolo Bonzini * Same reasoning can be applied to dirty page accounting. 1555a9624afSPaolo Bonzini */ 1568b8f9d75SLai Jiangshan if (is_writable_pte(old_spte)) 1575a9624afSPaolo Bonzini goto out; 1585a9624afSPaolo Bonzini 1590337f585SSean Christopherson /* 1600337f585SSean Christopherson * Unsync shadow pages that are reachable by the new, writable 1610337f585SSean Christopherson * SPTE. Write-protect the SPTE if the page can't be unsync'd, 1620337f585SSean Christopherson * e.g. it's write-tracked (upper-level SPs) or has one or more 1630337f585SSean Christopherson * shadow pages and unsync'ing pages is not allowed. 1640337f585SSean Christopherson */ 1652839180cSPaolo Bonzini if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) { 1665a9624afSPaolo Bonzini pgprintk("%s: found shadow page for %llx, marking ro\n", 1675a9624afSPaolo Bonzini __func__, gfn); 168ad67e480SPaolo Bonzini wrprot = true; 1695a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 1705fc3424fSSean Christopherson spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 1715a9624afSPaolo Bonzini } 1725a9624afSPaolo Bonzini } 1735a9624afSPaolo Bonzini 1745a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) 1755a9624afSPaolo Bonzini spte |= spte_shadow_dirty_mask(spte); 1765a9624afSPaolo Bonzini 1778b8f9d75SLai Jiangshan out: 1782839180cSPaolo Bonzini if (prefetch) 1795a9624afSPaolo Bonzini spte = mark_spte_for_access_track(spte); 1805a9624afSPaolo Bonzini 1813b77daa5SSean Christopherson WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), 1823b77daa5SSean Christopherson "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, 1833b77daa5SSean Christopherson get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); 1843b77daa5SSean Christopherson 18553597858SDavid Matlack if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { 18653597858SDavid Matlack /* Enforced by kvm_mmu_hugepage_adjust. */ 18753597858SDavid Matlack WARN_ON(level > PG_LEVEL_4K); 18853597858SDavid Matlack mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); 18953597858SDavid Matlack } 190bcc4f2bcSPaolo Bonzini 1915a9624afSPaolo Bonzini *new_spte = spte; 192ad67e480SPaolo Bonzini return wrprot; 1935a9624afSPaolo Bonzini } 1945a9624afSPaolo Bonzini 1955a9624afSPaolo Bonzini u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 1965a9624afSPaolo Bonzini { 197edea7c4fSSean Christopherson u64 spte = SPTE_MMU_PRESENT_MASK; 1985a9624afSPaolo Bonzini 199edea7c4fSSean Christopherson spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 2005a9624afSPaolo Bonzini shadow_user_mask | shadow_x_mask | shadow_me_mask; 2015a9624afSPaolo Bonzini 2025a9624afSPaolo Bonzini if (ad_disabled) 2038a406c89SSean Christopherson spte |= SPTE_TDP_AD_DISABLED_MASK; 2045a9624afSPaolo Bonzini else 2055a9624afSPaolo Bonzini spte |= shadow_accessed_mask; 2065a9624afSPaolo Bonzini 2075a9624afSPaolo Bonzini return spte; 2085a9624afSPaolo Bonzini } 2095a9624afSPaolo Bonzini 2105a9624afSPaolo Bonzini u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 2115a9624afSPaolo Bonzini { 2125a9624afSPaolo Bonzini u64 new_spte; 2135a9624afSPaolo Bonzini 2145a9624afSPaolo Bonzini new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 2155a9624afSPaolo Bonzini new_spte |= (u64)new_pfn << PAGE_SHIFT; 2165a9624afSPaolo Bonzini 2175a9624afSPaolo Bonzini new_spte &= ~PT_WRITABLE_MASK; 2185fc3424fSSean Christopherson new_spte &= ~shadow_host_writable_mask; 2195a9624afSPaolo Bonzini 2205a9624afSPaolo Bonzini new_spte = mark_spte_for_access_track(new_spte); 2215a9624afSPaolo Bonzini 2225a9624afSPaolo Bonzini return new_spte; 2235a9624afSPaolo Bonzini } 2245a9624afSPaolo Bonzini 2255a9624afSPaolo Bonzini static u8 kvm_get_shadow_phys_bits(void) 2265a9624afSPaolo Bonzini { 2275a9624afSPaolo Bonzini /* 2285a9624afSPaolo Bonzini * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 2295a9624afSPaolo Bonzini * in CPU detection code, but the processor treats those reduced bits as 2305a9624afSPaolo Bonzini * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 2315a9624afSPaolo Bonzini * the physical address bits reported by CPUID. 2325a9624afSPaolo Bonzini */ 2335a9624afSPaolo Bonzini if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 2345a9624afSPaolo Bonzini return cpuid_eax(0x80000008) & 0xff; 2355a9624afSPaolo Bonzini 2365a9624afSPaolo Bonzini /* 2375a9624afSPaolo Bonzini * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 2385a9624afSPaolo Bonzini * custom CPUID. Proceed with whatever the kernel found since these features 2395a9624afSPaolo Bonzini * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 2405a9624afSPaolo Bonzini */ 2415a9624afSPaolo Bonzini return boot_cpu_data.x86_phys_bits; 2425a9624afSPaolo Bonzini } 2435a9624afSPaolo Bonzini 2445a9624afSPaolo Bonzini u64 mark_spte_for_access_track(u64 spte) 2455a9624afSPaolo Bonzini { 2465a9624afSPaolo Bonzini if (spte_ad_enabled(spte)) 2475a9624afSPaolo Bonzini return spte & ~shadow_accessed_mask; 2485a9624afSPaolo Bonzini 2495a9624afSPaolo Bonzini if (is_access_track_spte(spte)) 2505a9624afSPaolo Bonzini return spte; 2515a9624afSPaolo Bonzini 2525a9624afSPaolo Bonzini /* 2535a9624afSPaolo Bonzini * Making an Access Tracking PTE will result in removal of write access 2545a9624afSPaolo Bonzini * from the PTE. So, verify that we will be able to restore the write 2555a9624afSPaolo Bonzini * access in the fast page fault path later on. 2565a9624afSPaolo Bonzini */ 2575a9624afSPaolo Bonzini WARN_ONCE((spte & PT_WRITABLE_MASK) && 2585a9624afSPaolo Bonzini !spte_can_locklessly_be_made_writable(spte), 2595a9624afSPaolo Bonzini "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 2605a9624afSPaolo Bonzini 2618a967d65SPaolo Bonzini WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 2628a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 2635a9624afSPaolo Bonzini "kvm: Access Tracking saved bit locations are not zero\n"); 2645a9624afSPaolo Bonzini 2658a967d65SPaolo Bonzini spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 2668a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 2675a9624afSPaolo Bonzini spte &= ~shadow_acc_track_mask; 2685a9624afSPaolo Bonzini 2695a9624afSPaolo Bonzini return spte; 2705a9624afSPaolo Bonzini } 2715a9624afSPaolo Bonzini 2728120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 2735a9624afSPaolo Bonzini { 2745a9624afSPaolo Bonzini BUG_ON((u64)(unsigned)access_mask != access_mask); 2755a9624afSPaolo Bonzini WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 27644aaa015SSean Christopherson 277b09763daSSean Christopherson if (!enable_mmio_caching) 278b09763daSSean Christopherson mmio_value = 0; 279b09763daSSean Christopherson 28044aaa015SSean Christopherson /* 28144aaa015SSean Christopherson * Disable MMIO caching if the MMIO value collides with the bits that 28244aaa015SSean Christopherson * are used to hold the relocated GFN when the L1TF mitigation is 28344aaa015SSean Christopherson * enabled. This should never fire as there is no known hardware that 28444aaa015SSean Christopherson * can trigger this condition, e.g. SME/SEV CPUs that require a custom 28544aaa015SSean Christopherson * MMIO value are not susceptible to L1TF. 28644aaa015SSean Christopherson */ 28744aaa015SSean Christopherson if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 28844aaa015SSean Christopherson SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 28944aaa015SSean Christopherson mmio_value = 0; 29044aaa015SSean Christopherson 291715f1079SSean Christopherson /* 292715f1079SSean Christopherson * The masked MMIO value must obviously match itself and a removed SPTE 293715f1079SSean Christopherson * must not get a false positive. Removed SPTEs and MMIO SPTEs should 294715f1079SSean Christopherson * never collide as MMIO must set some RWX bits, and removed SPTEs must 295715f1079SSean Christopherson * not set any RWX bits. 296715f1079SSean Christopherson */ 297715f1079SSean Christopherson if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || 298715f1079SSean Christopherson WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) 299715f1079SSean Christopherson mmio_value = 0; 300715f1079SSean Christopherson 3018120337aSSean Christopherson shadow_mmio_value = mmio_value; 3028120337aSSean Christopherson shadow_mmio_mask = mmio_mask; 3035a9624afSPaolo Bonzini shadow_mmio_access_mask = access_mask; 3045a9624afSPaolo Bonzini } 3055a9624afSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 3065a9624afSPaolo Bonzini 307e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 3085a9624afSPaolo Bonzini { 309e7b7bdeaSSean Christopherson shadow_user_mask = VMX_EPT_READABLE_MASK; 310e7b7bdeaSSean Christopherson shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; 311e7b7bdeaSSean Christopherson shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; 312e7b7bdeaSSean Christopherson shadow_nx_mask = 0ull; 313e7b7bdeaSSean Christopherson shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; 314e7b7bdeaSSean Christopherson shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; 315e7b7bdeaSSean Christopherson shadow_acc_track_mask = VMX_EPT_RWX_MASK; 316e7b7bdeaSSean Christopherson shadow_me_mask = 0ull; 3175a9624afSPaolo Bonzini 318613a3f37SSean Christopherson shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; 319613a3f37SSean Christopherson shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; 320613a3f37SSean Christopherson 321e7b7bdeaSSean Christopherson /* 322e7b7bdeaSSean Christopherson * EPT Misconfigurations are generated if the value of bits 2:0 323e7b7bdeaSSean Christopherson * of an EPT paging-structure entry is 110b (write/execute). 324e7b7bdeaSSean Christopherson */ 325e7b7bdeaSSean Christopherson kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 326e7b7bdeaSSean Christopherson VMX_EPT_RWX_MASK, 0); 3275a9624afSPaolo Bonzini } 328e7b7bdeaSSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 3295a9624afSPaolo Bonzini 3305a9624afSPaolo Bonzini void kvm_mmu_reset_all_pte_masks(void) 3315a9624afSPaolo Bonzini { 3325a9624afSPaolo Bonzini u8 low_phys_bits; 333d6b87f25SSean Christopherson u64 mask; 3345a9624afSPaolo Bonzini 3355a9624afSPaolo Bonzini shadow_phys_bits = kvm_get_shadow_phys_bits(); 3365a9624afSPaolo Bonzini 3375a9624afSPaolo Bonzini /* 3385a9624afSPaolo Bonzini * If the CPU has 46 or less physical address bits, then set an 3395a9624afSPaolo Bonzini * appropriate mask to guard against L1TF attacks. Otherwise, it is 3405a9624afSPaolo Bonzini * assumed that the CPU is not vulnerable to L1TF. 3415a9624afSPaolo Bonzini * 3425a9624afSPaolo Bonzini * Some Intel CPUs address the L1 cache using more PA bits than are 3435a9624afSPaolo Bonzini * reported by CPUID. Use the PA width of the L1 cache when possible 3445a9624afSPaolo Bonzini * to achieve more effective mitigation, e.g. if system RAM overlaps 3455a9624afSPaolo Bonzini * the most significant bits of legal physical address space. 3465a9624afSPaolo Bonzini */ 3475a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 0; 3485a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_phys_bits; 3495a9624afSPaolo Bonzini if (boot_cpu_has_bug(X86_BUG_L1TF) && 3505a9624afSPaolo Bonzini !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 3518a967d65SPaolo Bonzini 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 3525a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_cache_bits 3538a967d65SPaolo Bonzini - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 3545a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 3555a9624afSPaolo Bonzini rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 3565a9624afSPaolo Bonzini } 3575a9624afSPaolo Bonzini 3585a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_lower_gfn_mask = 3595a9624afSPaolo Bonzini GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 360d6b87f25SSean Christopherson 361e7b7bdeaSSean Christopherson shadow_user_mask = PT_USER_MASK; 362e7b7bdeaSSean Christopherson shadow_accessed_mask = PT_ACCESSED_MASK; 363e7b7bdeaSSean Christopherson shadow_dirty_mask = PT_DIRTY_MASK; 364e7b7bdeaSSean Christopherson shadow_nx_mask = PT64_NX_MASK; 365e7b7bdeaSSean Christopherson shadow_x_mask = 0; 366e7b7bdeaSSean Christopherson shadow_present_mask = PT_PRESENT_MASK; 367e7b7bdeaSSean Christopherson shadow_acc_track_mask = 0; 368e7b7bdeaSSean Christopherson shadow_me_mask = sme_me_mask; 369e7b7bdeaSSean Christopherson 3705fc3424fSSean Christopherson shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE; 3715fc3424fSSean Christopherson shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE; 3725fc3424fSSean Christopherson 373d6b87f25SSean Christopherson /* 374d6b87f25SSean Christopherson * Set a reserved PA bit in MMIO SPTEs to generate page faults with 375d6b87f25SSean Christopherson * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 376d6b87f25SSean Christopherson * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 377d6b87f25SSean Christopherson * 52-bit physical addresses then there are no reserved PA bits in the 378d6b87f25SSean Christopherson * PTEs and so the reserved PA approach must be disabled. 379d6b87f25SSean Christopherson */ 380d6b87f25SSean Christopherson if (shadow_phys_bits < 52) 381d6b87f25SSean Christopherson mask = BIT_ULL(51) | PT_PRESENT_MASK; 382d6b87f25SSean Christopherson else 383d6b87f25SSean Christopherson mask = 0; 384d6b87f25SSean Christopherson 385d6b87f25SSean Christopherson kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 3865a9624afSPaolo Bonzini } 387