15a9624afSPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only 25a9624afSPaolo Bonzini /* 35a9624afSPaolo Bonzini * Kernel-based Virtual Machine driver for Linux 45a9624afSPaolo Bonzini * 55a9624afSPaolo Bonzini * Macros and functions to access KVM PTEs (also known as SPTEs) 65a9624afSPaolo Bonzini * 75a9624afSPaolo Bonzini * Copyright (C) 2006 Qumranet, Inc. 85a9624afSPaolo Bonzini * Copyright 2020 Red Hat, Inc. and/or its affiliates. 95a9624afSPaolo Bonzini */ 105a9624afSPaolo Bonzini 115a9624afSPaolo Bonzini 125a9624afSPaolo Bonzini #include <linux/kvm_host.h> 135a9624afSPaolo Bonzini #include "mmu.h" 145a9624afSPaolo Bonzini #include "mmu_internal.h" 155a9624afSPaolo Bonzini #include "x86.h" 165a9624afSPaolo Bonzini #include "spte.h" 175a9624afSPaolo Bonzini 185a9624afSPaolo Bonzini #include <asm/e820/api.h> 194d5cff69SChristoph Hellwig #include <asm/memtype.h> 20e7b7bdeaSSean Christopherson #include <asm/vmx.h> 215a9624afSPaolo Bonzini 228b9e74bfSSean Christopherson bool __read_mostly enable_mmio_caching = true; 23c3e0c8c2SSean Christopherson static bool __ro_after_init allow_mmio_caching; 24b09763daSSean Christopherson module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 250c29397aSSean Christopherson EXPORT_SYMBOL_GPL(enable_mmio_caching); 26b09763daSSean Christopherson 275fc3424fSSean Christopherson u64 __read_mostly shadow_host_writable_mask; 285fc3424fSSean Christopherson u64 __read_mostly shadow_mmu_writable_mask; 295a9624afSPaolo Bonzini u64 __read_mostly shadow_nx_mask; 305a9624afSPaolo Bonzini u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 315a9624afSPaolo Bonzini u64 __read_mostly shadow_user_mask; 325a9624afSPaolo Bonzini u64 __read_mostly shadow_accessed_mask; 335a9624afSPaolo Bonzini u64 __read_mostly shadow_dirty_mask; 345a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_value; 358120337aSSean Christopherson u64 __read_mostly shadow_mmio_mask; 365a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_access_mask; 375a9624afSPaolo Bonzini u64 __read_mostly shadow_present_mask; 3838bf9d7bSSean Christopherson u64 __read_mostly shadow_memtype_mask; 39e54f1ff2SKai Huang u64 __read_mostly shadow_me_value; 405a9624afSPaolo Bonzini u64 __read_mostly shadow_me_mask; 415a9624afSPaolo Bonzini u64 __read_mostly shadow_acc_track_mask; 425a9624afSPaolo Bonzini 435a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 445a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 455a9624afSPaolo Bonzini 465a9624afSPaolo Bonzini u8 __read_mostly shadow_phys_bits; 475a9624afSPaolo Bonzini 48c3e0c8c2SSean Christopherson void __init kvm_mmu_spte_module_init(void) 49c3e0c8c2SSean Christopherson { 50c3e0c8c2SSean Christopherson /* 51c3e0c8c2SSean Christopherson * Snapshot userspace's desire to allow MMIO caching. Whether or not 52c3e0c8c2SSean Christopherson * KVM can actually enable MMIO caching depends on vendor-specific 53c3e0c8c2SSean Christopherson * hardware capabilities and other module params that can't be resolved 54c3e0c8c2SSean Christopherson * until the vendor module is loaded, i.e. enable_mmio_caching can and 55c3e0c8c2SSean Christopherson * will change when the vendor module is (re)loaded. 56c3e0c8c2SSean Christopherson */ 57c3e0c8c2SSean Christopherson allow_mmio_caching = enable_mmio_caching; 58c3e0c8c2SSean Christopherson } 59c3e0c8c2SSean Christopherson 605a9624afSPaolo Bonzini static u64 generation_mmio_spte_mask(u64 gen) 615a9624afSPaolo Bonzini { 625a9624afSPaolo Bonzini u64 mask; 635a9624afSPaolo Bonzini 645a9624afSPaolo Bonzini WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 655a9624afSPaolo Bonzini 6634c0f6f2SMaciej S. Szmigiero mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 6734c0f6f2SMaciej S. Szmigiero mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 685a9624afSPaolo Bonzini return mask; 695a9624afSPaolo Bonzini } 705a9624afSPaolo Bonzini 715a9624afSPaolo Bonzini u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 725a9624afSPaolo Bonzini { 735a9624afSPaolo Bonzini u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 74c236d962SSean Christopherson u64 spte = generation_mmio_spte_mask(gen); 755a9624afSPaolo Bonzini u64 gpa = gfn << PAGE_SHIFT; 765a9624afSPaolo Bonzini 7730ab5901SSean Christopherson WARN_ON_ONCE(!shadow_mmio_value); 7830ab5901SSean Christopherson 795a9624afSPaolo Bonzini access &= shadow_mmio_access_mask; 80c236d962SSean Christopherson spte |= shadow_mmio_value | access; 81c236d962SSean Christopherson spte |= gpa | shadow_nonpresent_or_rsvd_mask; 82c236d962SSean Christopherson spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 838a967d65SPaolo Bonzini << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 845a9624afSPaolo Bonzini 85c236d962SSean Christopherson return spte; 865a9624afSPaolo Bonzini } 875a9624afSPaolo Bonzini 885a9624afSPaolo Bonzini static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 895a9624afSPaolo Bonzini { 905a9624afSPaolo Bonzini if (pfn_valid(pfn)) 915a9624afSPaolo Bonzini return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 925a9624afSPaolo Bonzini /* 935a9624afSPaolo Bonzini * Some reserved pages, such as those from NVDIMM 945a9624afSPaolo Bonzini * DAX devices, are not for MMIO, and can be mapped 955a9624afSPaolo Bonzini * with cached memory type for better performance. 965a9624afSPaolo Bonzini * However, the above check misconceives those pages 975a9624afSPaolo Bonzini * as MMIO, and results in KVM mapping them with UC 985a9624afSPaolo Bonzini * memory type, which would hurt the performance. 995a9624afSPaolo Bonzini * Therefore, we check the host memory type in addition 1005a9624afSPaolo Bonzini * and only treat UC/UC-/WC pages as MMIO. 1015a9624afSPaolo Bonzini */ 1025a9624afSPaolo Bonzini (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 1035a9624afSPaolo Bonzini 1045a9624afSPaolo Bonzini return !e820__mapped_raw_any(pfn_to_hpa(pfn), 1055a9624afSPaolo Bonzini pfn_to_hpa(pfn + 1) - 1, 1065a9624afSPaolo Bonzini E820_TYPE_RAM); 1075a9624afSPaolo Bonzini } 1085a9624afSPaolo Bonzini 10954eb3ef5SSean Christopherson /* 11054eb3ef5SSean Christopherson * Returns true if the SPTE has bits that may be set without holding mmu_lock. 11154eb3ef5SSean Christopherson * The caller is responsible for checking if the SPTE is shadow-present, and 11254eb3ef5SSean Christopherson * for determining whether or not the caller cares about non-leaf SPTEs. 11354eb3ef5SSean Christopherson */ 11454eb3ef5SSean Christopherson bool spte_has_volatile_bits(u64 spte) 11554eb3ef5SSean Christopherson { 11654eb3ef5SSean Christopherson /* 11754eb3ef5SSean Christopherson * Always atomically update spte if it can be updated 11854eb3ef5SSean Christopherson * out of mmu-lock, it can ensure dirty bit is not lost, 11954eb3ef5SSean Christopherson * also, it can help us to get a stable is_writable_pte() 12054eb3ef5SSean Christopherson * to ensure tlb flush is not missed. 12154eb3ef5SSean Christopherson */ 12254eb3ef5SSean Christopherson if (!is_writable_pte(spte) && is_mmu_writable_spte(spte)) 12354eb3ef5SSean Christopherson return true; 12454eb3ef5SSean Christopherson 12554eb3ef5SSean Christopherson if (is_access_track_spte(spte)) 12654eb3ef5SSean Christopherson return true; 12754eb3ef5SSean Christopherson 12854eb3ef5SSean Christopherson if (spte_ad_enabled(spte)) { 12954eb3ef5SSean Christopherson if (!(spte & shadow_accessed_mask) || 13054eb3ef5SSean Christopherson (is_writable_pte(spte) && !(spte & shadow_dirty_mask))) 13154eb3ef5SSean Christopherson return true; 13254eb3ef5SSean Christopherson } 13354eb3ef5SSean Christopherson 13454eb3ef5SSean Christopherson return false; 13554eb3ef5SSean Christopherson } 13654eb3ef5SSean Christopherson 1377158bee4SPaolo Bonzini bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1388283e36aSBen Gardon const struct kvm_memory_slot *slot, 1397158bee4SPaolo Bonzini unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 1402839180cSPaolo Bonzini u64 old_spte, bool prefetch, bool can_unsync, 1417158bee4SPaolo Bonzini bool host_writable, u64 *new_spte) 1425a9624afSPaolo Bonzini { 1437158bee4SPaolo Bonzini int level = sp->role.level; 144edea7c4fSSean Christopherson u64 spte = SPTE_MMU_PRESENT_MASK; 145ad67e480SPaolo Bonzini bool wrprot = false; 1465a9624afSPaolo Bonzini 1479fb35657SSean Christopherson WARN_ON_ONCE(!pte_access && !shadow_present_mask); 1489fb35657SSean Christopherson 1497158bee4SPaolo Bonzini if (sp->role.ad_disabled) 1508a406c89SSean Christopherson spte |= SPTE_TDP_AD_DISABLED_MASK; 151ce92ef76SSean Christopherson else if (kvm_mmu_page_ad_need_write_protect(sp)) 1528a406c89SSean Christopherson spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; 1538a406c89SSean Christopherson 1548a406c89SSean Christopherson /* 1555a9624afSPaolo Bonzini * For the EPT case, shadow_present_mask is 0 if hardware 1565a9624afSPaolo Bonzini * supports exec-only page table entries. In that case, 1575a9624afSPaolo Bonzini * ACC_USER_MASK and shadow_user_mask are used to represent 1585a9624afSPaolo Bonzini * read access. See FNAME(gpte_access) in paging_tmpl.h. 1595a9624afSPaolo Bonzini */ 1605a9624afSPaolo Bonzini spte |= shadow_present_mask; 1612839180cSPaolo Bonzini if (!prefetch) 1625a9624afSPaolo Bonzini spte |= spte_shadow_accessed_mask(spte); 1635a9624afSPaolo Bonzini 164*b5b0977fSSean Christopherson /* 165*b5b0977fSSean Christopherson * For simplicity, enforce the NX huge page mitigation even if not 166*b5b0977fSSean Christopherson * strictly necessary. KVM could ignore the mitigation if paging is 167*b5b0977fSSean Christopherson * disabled in the guest, as the guest doesn't have an page tables to 168*b5b0977fSSean Christopherson * abuse. But to safely ignore the mitigation, KVM would have to 169*b5b0977fSSean Christopherson * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG 170*b5b0977fSSean Christopherson * is toggled on, and that's a net negative for performance when TDP is 171*b5b0977fSSean Christopherson * enabled. When TDP is disabled, KVM will always switch to a new MMU 172*b5b0977fSSean Christopherson * when CR0.PG is toggled, but leveraging that to ignore the mitigation 173*b5b0977fSSean Christopherson * would tie make_spte() further to vCPU/MMU state, and add complexity 174*b5b0977fSSean Christopherson * just to optimize a mode that is anything but performance critical. 175*b5b0977fSSean Christopherson */ 1765a9624afSPaolo Bonzini if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 177084cc29fSBen Gardon is_nx_huge_page_enabled(vcpu->kvm)) { 1785a9624afSPaolo Bonzini pte_access &= ~ACC_EXEC_MASK; 1795a9624afSPaolo Bonzini } 1805a9624afSPaolo Bonzini 1815a9624afSPaolo Bonzini if (pte_access & ACC_EXEC_MASK) 1825a9624afSPaolo Bonzini spte |= shadow_x_mask; 1835a9624afSPaolo Bonzini else 1845a9624afSPaolo Bonzini spte |= shadow_nx_mask; 1855a9624afSPaolo Bonzini 1865a9624afSPaolo Bonzini if (pte_access & ACC_USER_MASK) 1875a9624afSPaolo Bonzini spte |= shadow_user_mask; 1885a9624afSPaolo Bonzini 1895a9624afSPaolo Bonzini if (level > PG_LEVEL_4K) 1905a9624afSPaolo Bonzini spte |= PT_PAGE_SIZE_MASK; 19138bf9d7bSSean Christopherson 19238bf9d7bSSean Christopherson if (shadow_memtype_mask) 193b3646477SJason Baron spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 1945a9624afSPaolo Bonzini kvm_is_mmio_pfn(pfn)); 1955a9624afSPaolo Bonzini if (host_writable) 1965fc3424fSSean Christopherson spte |= shadow_host_writable_mask; 1975a9624afSPaolo Bonzini else 1985a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 1995a9624afSPaolo Bonzini 200e54f1ff2SKai Huang if (shadow_me_value && !kvm_is_mmio_pfn(pfn)) 201e54f1ff2SKai Huang spte |= shadow_me_value; 2025a9624afSPaolo Bonzini 2035a9624afSPaolo Bonzini spte |= (u64)pfn << PAGE_SHIFT; 2045a9624afSPaolo Bonzini 2055a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) { 2065fc3424fSSean Christopherson spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; 2075a9624afSPaolo Bonzini 2085a9624afSPaolo Bonzini /* 2095a9624afSPaolo Bonzini * Optimization: for pte sync, if spte was writable the hash 2105a9624afSPaolo Bonzini * lookup is unnecessary (and expensive). Write protection 2110337f585SSean Christopherson * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. 2125a9624afSPaolo Bonzini * Same reasoning can be applied to dirty page accounting. 2135a9624afSPaolo Bonzini */ 2148b8f9d75SLai Jiangshan if (is_writable_pte(old_spte)) 2155a9624afSPaolo Bonzini goto out; 2165a9624afSPaolo Bonzini 2170337f585SSean Christopherson /* 2180337f585SSean Christopherson * Unsync shadow pages that are reachable by the new, writable 2190337f585SSean Christopherson * SPTE. Write-protect the SPTE if the page can't be unsync'd, 2200337f585SSean Christopherson * e.g. it's write-tracked (upper-level SPs) or has one or more 2210337f585SSean Christopherson * shadow pages and unsync'ing pages is not allowed. 2220337f585SSean Christopherson */ 2234d78d0b3SBen Gardon if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { 2245a9624afSPaolo Bonzini pgprintk("%s: found shadow page for %llx, marking ro\n", 2255a9624afSPaolo Bonzini __func__, gfn); 226ad67e480SPaolo Bonzini wrprot = true; 2275a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 2285fc3424fSSean Christopherson spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); 2295a9624afSPaolo Bonzini } 2305a9624afSPaolo Bonzini } 2315a9624afSPaolo Bonzini 2325a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) 2335a9624afSPaolo Bonzini spte |= spte_shadow_dirty_mask(spte); 2345a9624afSPaolo Bonzini 2358b8f9d75SLai Jiangshan out: 2362839180cSPaolo Bonzini if (prefetch) 2375a9624afSPaolo Bonzini spte = mark_spte_for_access_track(spte); 2385a9624afSPaolo Bonzini 2393b77daa5SSean Christopherson WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), 2403b77daa5SSean Christopherson "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, 2413b77daa5SSean Christopherson get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); 2423b77daa5SSean Christopherson 24353597858SDavid Matlack if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { 24453597858SDavid Matlack /* Enforced by kvm_mmu_hugepage_adjust. */ 24553597858SDavid Matlack WARN_ON(level > PG_LEVEL_4K); 24653597858SDavid Matlack mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); 24753597858SDavid Matlack } 248bcc4f2bcSPaolo Bonzini 2495a9624afSPaolo Bonzini *new_spte = spte; 250ad67e480SPaolo Bonzini return wrprot; 2515a9624afSPaolo Bonzini } 2525a9624afSPaolo Bonzini 253a3fe5dbdSDavid Matlack static u64 make_spte_executable(u64 spte) 254a3fe5dbdSDavid Matlack { 255a3fe5dbdSDavid Matlack bool is_access_track = is_access_track_spte(spte); 256a3fe5dbdSDavid Matlack 257a3fe5dbdSDavid Matlack if (is_access_track) 258a3fe5dbdSDavid Matlack spte = restore_acc_track_spte(spte); 259a3fe5dbdSDavid Matlack 260a3fe5dbdSDavid Matlack spte &= ~shadow_nx_mask; 261a3fe5dbdSDavid Matlack spte |= shadow_x_mask; 262a3fe5dbdSDavid Matlack 263a3fe5dbdSDavid Matlack if (is_access_track) 264a3fe5dbdSDavid Matlack spte = mark_spte_for_access_track(spte); 265a3fe5dbdSDavid Matlack 266a3fe5dbdSDavid Matlack return spte; 267a3fe5dbdSDavid Matlack } 268a3fe5dbdSDavid Matlack 269a3fe5dbdSDavid Matlack /* 270a3fe5dbdSDavid Matlack * Construct an SPTE that maps a sub-page of the given huge page SPTE where 271a3fe5dbdSDavid Matlack * `index` identifies which sub-page. 272a3fe5dbdSDavid Matlack * 273a3fe5dbdSDavid Matlack * This is used during huge page splitting to build the SPTEs that make up the 274a3fe5dbdSDavid Matlack * new page table. 275a3fe5dbdSDavid Matlack */ 27647855da0SDavid Matlack u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, 277084cc29fSBen Gardon int index) 278a3fe5dbdSDavid Matlack { 279a3fe5dbdSDavid Matlack u64 child_spte; 280a3fe5dbdSDavid Matlack 281a3fe5dbdSDavid Matlack if (WARN_ON_ONCE(!is_shadow_present_pte(huge_spte))) 282a3fe5dbdSDavid Matlack return 0; 283a3fe5dbdSDavid Matlack 284a3fe5dbdSDavid Matlack if (WARN_ON_ONCE(!is_large_pte(huge_spte))) 285a3fe5dbdSDavid Matlack return 0; 286a3fe5dbdSDavid Matlack 287a3fe5dbdSDavid Matlack child_spte = huge_spte; 288a3fe5dbdSDavid Matlack 289a3fe5dbdSDavid Matlack /* 290a3fe5dbdSDavid Matlack * The child_spte already has the base address of the huge page being 291a3fe5dbdSDavid Matlack * split. So we just have to OR in the offset to the page at the next 292a3fe5dbdSDavid Matlack * lower level for the given index. 293a3fe5dbdSDavid Matlack */ 29447855da0SDavid Matlack child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT; 295a3fe5dbdSDavid Matlack 29647855da0SDavid Matlack if (role.level == PG_LEVEL_4K) { 297a3fe5dbdSDavid Matlack child_spte &= ~PT_PAGE_SIZE_MASK; 298a3fe5dbdSDavid Matlack 299a3fe5dbdSDavid Matlack /* 30047855da0SDavid Matlack * When splitting to a 4K page where execution is allowed, mark 30147855da0SDavid Matlack * the page executable as the NX hugepage mitigation no longer 30247855da0SDavid Matlack * applies. 303a3fe5dbdSDavid Matlack */ 30447855da0SDavid Matlack if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm)) 305a3fe5dbdSDavid Matlack child_spte = make_spte_executable(child_spte); 306a3fe5dbdSDavid Matlack } 307a3fe5dbdSDavid Matlack 308a3fe5dbdSDavid Matlack return child_spte; 309a3fe5dbdSDavid Matlack } 310a3fe5dbdSDavid Matlack 311a3fe5dbdSDavid Matlack 3125a9624afSPaolo Bonzini u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 3135a9624afSPaolo Bonzini { 314edea7c4fSSean Christopherson u64 spte = SPTE_MMU_PRESENT_MASK; 3155a9624afSPaolo Bonzini 316edea7c4fSSean Christopherson spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 317e54f1ff2SKai Huang shadow_user_mask | shadow_x_mask | shadow_me_value; 3185a9624afSPaolo Bonzini 3195a9624afSPaolo Bonzini if (ad_disabled) 3208a406c89SSean Christopherson spte |= SPTE_TDP_AD_DISABLED_MASK; 3215a9624afSPaolo Bonzini else 3225a9624afSPaolo Bonzini spte |= shadow_accessed_mask; 3235a9624afSPaolo Bonzini 3245a9624afSPaolo Bonzini return spte; 3255a9624afSPaolo Bonzini } 3265a9624afSPaolo Bonzini 3275a9624afSPaolo Bonzini u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 3285a9624afSPaolo Bonzini { 3295a9624afSPaolo Bonzini u64 new_spte; 3305a9624afSPaolo Bonzini 3312ca3129eSSean Christopherson new_spte = old_spte & ~SPTE_BASE_ADDR_MASK; 3325a9624afSPaolo Bonzini new_spte |= (u64)new_pfn << PAGE_SHIFT; 3335a9624afSPaolo Bonzini 3345a9624afSPaolo Bonzini new_spte &= ~PT_WRITABLE_MASK; 3355fc3424fSSean Christopherson new_spte &= ~shadow_host_writable_mask; 336f082d86eSDavid Matlack new_spte &= ~shadow_mmu_writable_mask; 3375a9624afSPaolo Bonzini 3385a9624afSPaolo Bonzini new_spte = mark_spte_for_access_track(new_spte); 3395a9624afSPaolo Bonzini 3405a9624afSPaolo Bonzini return new_spte; 3415a9624afSPaolo Bonzini } 3425a9624afSPaolo Bonzini 3435a9624afSPaolo Bonzini u64 mark_spte_for_access_track(u64 spte) 3445a9624afSPaolo Bonzini { 3455a9624afSPaolo Bonzini if (spte_ad_enabled(spte)) 3465a9624afSPaolo Bonzini return spte & ~shadow_accessed_mask; 3475a9624afSPaolo Bonzini 3485a9624afSPaolo Bonzini if (is_access_track_spte(spte)) 3495a9624afSPaolo Bonzini return spte; 3505a9624afSPaolo Bonzini 351115111efSDavid Matlack check_spte_writable_invariants(spte); 3525a9624afSPaolo Bonzini 3538a967d65SPaolo Bonzini WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 3548a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 3555a9624afSPaolo Bonzini "kvm: Access Tracking saved bit locations are not zero\n"); 3565a9624afSPaolo Bonzini 3578a967d65SPaolo Bonzini spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 3588a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 3595a9624afSPaolo Bonzini spte &= ~shadow_acc_track_mask; 3605a9624afSPaolo Bonzini 3615a9624afSPaolo Bonzini return spte; 3625a9624afSPaolo Bonzini } 3635a9624afSPaolo Bonzini 3648120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 3655a9624afSPaolo Bonzini { 3665a9624afSPaolo Bonzini BUG_ON((u64)(unsigned)access_mask != access_mask); 3675a9624afSPaolo Bonzini WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 36844aaa015SSean Christopherson 369c3e0c8c2SSean Christopherson /* 370c3e0c8c2SSean Christopherson * Reset to the original module param value to honor userspace's desire 371c3e0c8c2SSean Christopherson * to (dis)allow MMIO caching. Update the param itself so that 372c3e0c8c2SSean Christopherson * userspace can see whether or not KVM is actually using MMIO caching. 373c3e0c8c2SSean Christopherson */ 374c3e0c8c2SSean Christopherson enable_mmio_caching = allow_mmio_caching; 375b09763daSSean Christopherson if (!enable_mmio_caching) 376b09763daSSean Christopherson mmio_value = 0; 377b09763daSSean Christopherson 37844aaa015SSean Christopherson /* 3798bad4606SSean Christopherson * The mask must contain only bits that are carved out specifically for 3808bad4606SSean Christopherson * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO 3818bad4606SSean Christopherson * generation. 3828bad4606SSean Christopherson */ 3838bad4606SSean Christopherson if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK)) 3848bad4606SSean Christopherson mmio_value = 0; 3858bad4606SSean Christopherson 3868bad4606SSean Christopherson /* 38744aaa015SSean Christopherson * Disable MMIO caching if the MMIO value collides with the bits that 38844aaa015SSean Christopherson * are used to hold the relocated GFN when the L1TF mitigation is 38944aaa015SSean Christopherson * enabled. This should never fire as there is no known hardware that 39044aaa015SSean Christopherson * can trigger this condition, e.g. SME/SEV CPUs that require a custom 39144aaa015SSean Christopherson * MMIO value are not susceptible to L1TF. 39244aaa015SSean Christopherson */ 39344aaa015SSean Christopherson if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 39444aaa015SSean Christopherson SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 39544aaa015SSean Christopherson mmio_value = 0; 39644aaa015SSean Christopherson 397715f1079SSean Christopherson /* 398715f1079SSean Christopherson * The masked MMIO value must obviously match itself and a removed SPTE 399715f1079SSean Christopherson * must not get a false positive. Removed SPTEs and MMIO SPTEs should 400715f1079SSean Christopherson * never collide as MMIO must set some RWX bits, and removed SPTEs must 401715f1079SSean Christopherson * not set any RWX bits. 402715f1079SSean Christopherson */ 403715f1079SSean Christopherson if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || 404715f1079SSean Christopherson WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) 405715f1079SSean Christopherson mmio_value = 0; 406715f1079SSean Christopherson 4078b9e74bfSSean Christopherson if (!mmio_value) 4088b9e74bfSSean Christopherson enable_mmio_caching = false; 4098b9e74bfSSean Christopherson 4108120337aSSean Christopherson shadow_mmio_value = mmio_value; 4118120337aSSean Christopherson shadow_mmio_mask = mmio_mask; 4125a9624afSPaolo Bonzini shadow_mmio_access_mask = access_mask; 4135a9624afSPaolo Bonzini } 4145a9624afSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 4155a9624afSPaolo Bonzini 416e54f1ff2SKai Huang void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) 417e54f1ff2SKai Huang { 418e54f1ff2SKai Huang /* shadow_me_value must be a subset of shadow_me_mask */ 419e54f1ff2SKai Huang if (WARN_ON(me_value & ~me_mask)) 420e54f1ff2SKai Huang me_value = me_mask = 0; 421e54f1ff2SKai Huang 422e54f1ff2SKai Huang shadow_me_value = me_value; 423e54f1ff2SKai Huang shadow_me_mask = me_mask; 424e54f1ff2SKai Huang } 425e54f1ff2SKai Huang EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); 426e54f1ff2SKai Huang 427e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 4285a9624afSPaolo Bonzini { 429e7b7bdeaSSean Christopherson shadow_user_mask = VMX_EPT_READABLE_MASK; 430e7b7bdeaSSean Christopherson shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; 431e7b7bdeaSSean Christopherson shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; 432e7b7bdeaSSean Christopherson shadow_nx_mask = 0ull; 433e7b7bdeaSSean Christopherson shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; 434e7b7bdeaSSean Christopherson shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; 43538bf9d7bSSean Christopherson /* 43638bf9d7bSSean Christopherson * EPT overrides the host MTRRs, and so KVM must program the desired 43738bf9d7bSSean Christopherson * memtype directly into the SPTEs. Note, this mask is just the mask 43838bf9d7bSSean Christopherson * of all bits that factor into the memtype, the actual memtype must be 43938bf9d7bSSean Christopherson * dynamically calculated, e.g. to ensure host MMIO is mapped UC. 44038bf9d7bSSean Christopherson */ 44138bf9d7bSSean Christopherson shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT; 442e7b7bdeaSSean Christopherson shadow_acc_track_mask = VMX_EPT_RWX_MASK; 443613a3f37SSean Christopherson shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; 444613a3f37SSean Christopherson shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; 445613a3f37SSean Christopherson 446e7b7bdeaSSean Christopherson /* 447e7b7bdeaSSean Christopherson * EPT Misconfigurations are generated if the value of bits 2:0 448e7b7bdeaSSean Christopherson * of an EPT paging-structure entry is 110b (write/execute). 449e7b7bdeaSSean Christopherson */ 450e7b7bdeaSSean Christopherson kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 451e7b7bdeaSSean Christopherson VMX_EPT_RWX_MASK, 0); 4525a9624afSPaolo Bonzini } 453e7b7bdeaSSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 4545a9624afSPaolo Bonzini 4555a9624afSPaolo Bonzini void kvm_mmu_reset_all_pte_masks(void) 4565a9624afSPaolo Bonzini { 4575a9624afSPaolo Bonzini u8 low_phys_bits; 458d6b87f25SSean Christopherson u64 mask; 4595a9624afSPaolo Bonzini 4605a9624afSPaolo Bonzini shadow_phys_bits = kvm_get_shadow_phys_bits(); 4615a9624afSPaolo Bonzini 4625a9624afSPaolo Bonzini /* 4635a9624afSPaolo Bonzini * If the CPU has 46 or less physical address bits, then set an 4645a9624afSPaolo Bonzini * appropriate mask to guard against L1TF attacks. Otherwise, it is 4655a9624afSPaolo Bonzini * assumed that the CPU is not vulnerable to L1TF. 4665a9624afSPaolo Bonzini * 4675a9624afSPaolo Bonzini * Some Intel CPUs address the L1 cache using more PA bits than are 4685a9624afSPaolo Bonzini * reported by CPUID. Use the PA width of the L1 cache when possible 4695a9624afSPaolo Bonzini * to achieve more effective mitigation, e.g. if system RAM overlaps 4705a9624afSPaolo Bonzini * the most significant bits of legal physical address space. 4715a9624afSPaolo Bonzini */ 4725a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 0; 4735a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_phys_bits; 4745a9624afSPaolo Bonzini if (boot_cpu_has_bug(X86_BUG_L1TF) && 4755a9624afSPaolo Bonzini !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 4768a967d65SPaolo Bonzini 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 4775a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_cache_bits 4788a967d65SPaolo Bonzini - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 4795a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 4805a9624afSPaolo Bonzini rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 4815a9624afSPaolo Bonzini } 4825a9624afSPaolo Bonzini 4835a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_lower_gfn_mask = 4845a9624afSPaolo Bonzini GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 485d6b87f25SSean Christopherson 486e7b7bdeaSSean Christopherson shadow_user_mask = PT_USER_MASK; 487e7b7bdeaSSean Christopherson shadow_accessed_mask = PT_ACCESSED_MASK; 488e7b7bdeaSSean Christopherson shadow_dirty_mask = PT_DIRTY_MASK; 489e7b7bdeaSSean Christopherson shadow_nx_mask = PT64_NX_MASK; 490e7b7bdeaSSean Christopherson shadow_x_mask = 0; 491e7b7bdeaSSean Christopherson shadow_present_mask = PT_PRESENT_MASK; 49238bf9d7bSSean Christopherson 49338bf9d7bSSean Christopherson /* 49438bf9d7bSSean Christopherson * For shadow paging and NPT, KVM uses PAT entry '0' to encode WB 49538bf9d7bSSean Christopherson * memtype in the SPTEs, i.e. relies on host MTRRs to provide the 49638bf9d7bSSean Christopherson * correct memtype (WB is the "weakest" memtype). 49738bf9d7bSSean Christopherson */ 49838bf9d7bSSean Christopherson shadow_memtype_mask = 0; 499e7b7bdeaSSean Christopherson shadow_acc_track_mask = 0; 500e54f1ff2SKai Huang shadow_me_mask = 0; 501e54f1ff2SKai Huang shadow_me_value = 0; 502e7b7bdeaSSean Christopherson 5031ca87e01SDavid Matlack shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE; 5041ca87e01SDavid Matlack shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE; 5055fc3424fSSean Christopherson 506d6b87f25SSean Christopherson /* 507d6b87f25SSean Christopherson * Set a reserved PA bit in MMIO SPTEs to generate page faults with 508d6b87f25SSean Christopherson * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 509d6b87f25SSean Christopherson * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 510d6b87f25SSean Christopherson * 52-bit physical addresses then there are no reserved PA bits in the 511d6b87f25SSean Christopherson * PTEs and so the reserved PA approach must be disabled. 512d6b87f25SSean Christopherson */ 513d6b87f25SSean Christopherson if (shadow_phys_bits < 52) 514d6b87f25SSean Christopherson mask = BIT_ULL(51) | PT_PRESENT_MASK; 515d6b87f25SSean Christopherson else 516d6b87f25SSean Christopherson mask = 0; 517d6b87f25SSean Christopherson 518d6b87f25SSean Christopherson kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 5195a9624afSPaolo Bonzini } 520