15a9624afSPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only 25a9624afSPaolo Bonzini /* 35a9624afSPaolo Bonzini * Kernel-based Virtual Machine driver for Linux 45a9624afSPaolo Bonzini * 55a9624afSPaolo Bonzini * Macros and functions to access KVM PTEs (also known as SPTEs) 65a9624afSPaolo Bonzini * 75a9624afSPaolo Bonzini * Copyright (C) 2006 Qumranet, Inc. 85a9624afSPaolo Bonzini * Copyright 2020 Red Hat, Inc. and/or its affiliates. 95a9624afSPaolo Bonzini */ 105a9624afSPaolo Bonzini 115a9624afSPaolo Bonzini 125a9624afSPaolo Bonzini #include <linux/kvm_host.h> 135a9624afSPaolo Bonzini #include "mmu.h" 145a9624afSPaolo Bonzini #include "mmu_internal.h" 155a9624afSPaolo Bonzini #include "x86.h" 165a9624afSPaolo Bonzini #include "spte.h" 175a9624afSPaolo Bonzini 185a9624afSPaolo Bonzini #include <asm/e820/api.h> 195a9624afSPaolo Bonzini 205a9624afSPaolo Bonzini u64 __read_mostly shadow_nx_mask; 215a9624afSPaolo Bonzini u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 225a9624afSPaolo Bonzini u64 __read_mostly shadow_user_mask; 235a9624afSPaolo Bonzini u64 __read_mostly shadow_accessed_mask; 245a9624afSPaolo Bonzini u64 __read_mostly shadow_dirty_mask; 255a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_value; 265a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_access_mask; 275a9624afSPaolo Bonzini u64 __read_mostly shadow_present_mask; 285a9624afSPaolo Bonzini u64 __read_mostly shadow_me_mask; 295a9624afSPaolo Bonzini u64 __read_mostly shadow_acc_track_mask; 305a9624afSPaolo Bonzini 315a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 325a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 335a9624afSPaolo Bonzini 345a9624afSPaolo Bonzini u8 __read_mostly shadow_phys_bits; 355a9624afSPaolo Bonzini 365a9624afSPaolo Bonzini static u64 generation_mmio_spte_mask(u64 gen) 375a9624afSPaolo Bonzini { 385a9624afSPaolo Bonzini u64 mask; 395a9624afSPaolo Bonzini 405a9624afSPaolo Bonzini WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); 415a9624afSPaolo Bonzini BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); 425a9624afSPaolo Bonzini 4334c0f6f2SMaciej S. Szmigiero mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 4434c0f6f2SMaciej S. Szmigiero mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 455a9624afSPaolo Bonzini return mask; 465a9624afSPaolo Bonzini } 475a9624afSPaolo Bonzini 485a9624afSPaolo Bonzini u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 495a9624afSPaolo Bonzini { 505a9624afSPaolo Bonzini u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 515a9624afSPaolo Bonzini u64 mask = generation_mmio_spte_mask(gen); 525a9624afSPaolo Bonzini u64 gpa = gfn << PAGE_SHIFT; 535a9624afSPaolo Bonzini 545a9624afSPaolo Bonzini access &= shadow_mmio_access_mask; 555a9624afSPaolo Bonzini mask |= shadow_mmio_value | access; 565a9624afSPaolo Bonzini mask |= gpa | shadow_nonpresent_or_rsvd_mask; 575a9624afSPaolo Bonzini mask |= (gpa & shadow_nonpresent_or_rsvd_mask) 588a967d65SPaolo Bonzini << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 595a9624afSPaolo Bonzini 605a9624afSPaolo Bonzini return mask; 615a9624afSPaolo Bonzini } 625a9624afSPaolo Bonzini 635a9624afSPaolo Bonzini static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 645a9624afSPaolo Bonzini { 655a9624afSPaolo Bonzini if (pfn_valid(pfn)) 665a9624afSPaolo Bonzini return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 675a9624afSPaolo Bonzini /* 685a9624afSPaolo Bonzini * Some reserved pages, such as those from NVDIMM 695a9624afSPaolo Bonzini * DAX devices, are not for MMIO, and can be mapped 705a9624afSPaolo Bonzini * with cached memory type for better performance. 715a9624afSPaolo Bonzini * However, the above check misconceives those pages 725a9624afSPaolo Bonzini * as MMIO, and results in KVM mapping them with UC 735a9624afSPaolo Bonzini * memory type, which would hurt the performance. 745a9624afSPaolo Bonzini * Therefore, we check the host memory type in addition 755a9624afSPaolo Bonzini * and only treat UC/UC-/WC pages as MMIO. 765a9624afSPaolo Bonzini */ 775a9624afSPaolo Bonzini (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 785a9624afSPaolo Bonzini 795a9624afSPaolo Bonzini return !e820__mapped_raw_any(pfn_to_hpa(pfn), 805a9624afSPaolo Bonzini pfn_to_hpa(pfn + 1) - 1, 815a9624afSPaolo Bonzini E820_TYPE_RAM); 825a9624afSPaolo Bonzini } 835a9624afSPaolo Bonzini 845a9624afSPaolo Bonzini int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, 855a9624afSPaolo Bonzini gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, 865a9624afSPaolo Bonzini bool can_unsync, bool host_writable, bool ad_disabled, 875a9624afSPaolo Bonzini u64 *new_spte) 885a9624afSPaolo Bonzini { 895a9624afSPaolo Bonzini u64 spte = 0; 905a9624afSPaolo Bonzini int ret = 0; 915a9624afSPaolo Bonzini 925a9624afSPaolo Bonzini if (ad_disabled) 935a9624afSPaolo Bonzini spte |= SPTE_AD_DISABLED_MASK; 945a9624afSPaolo Bonzini else if (kvm_vcpu_ad_need_write_protect(vcpu)) 955a9624afSPaolo Bonzini spte |= SPTE_AD_WRPROT_ONLY_MASK; 965a9624afSPaolo Bonzini 975a9624afSPaolo Bonzini /* 985a9624afSPaolo Bonzini * For the EPT case, shadow_present_mask is 0 if hardware 995a9624afSPaolo Bonzini * supports exec-only page table entries. In that case, 1005a9624afSPaolo Bonzini * ACC_USER_MASK and shadow_user_mask are used to represent 1015a9624afSPaolo Bonzini * read access. See FNAME(gpte_access) in paging_tmpl.h. 1025a9624afSPaolo Bonzini */ 1035a9624afSPaolo Bonzini spte |= shadow_present_mask; 1045a9624afSPaolo Bonzini if (!speculative) 1055a9624afSPaolo Bonzini spte |= spte_shadow_accessed_mask(spte); 1065a9624afSPaolo Bonzini 1075a9624afSPaolo Bonzini if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 1085a9624afSPaolo Bonzini is_nx_huge_page_enabled()) { 1095a9624afSPaolo Bonzini pte_access &= ~ACC_EXEC_MASK; 1105a9624afSPaolo Bonzini } 1115a9624afSPaolo Bonzini 1125a9624afSPaolo Bonzini if (pte_access & ACC_EXEC_MASK) 1135a9624afSPaolo Bonzini spte |= shadow_x_mask; 1145a9624afSPaolo Bonzini else 1155a9624afSPaolo Bonzini spte |= shadow_nx_mask; 1165a9624afSPaolo Bonzini 1175a9624afSPaolo Bonzini if (pte_access & ACC_USER_MASK) 1185a9624afSPaolo Bonzini spte |= shadow_user_mask; 1195a9624afSPaolo Bonzini 1205a9624afSPaolo Bonzini if (level > PG_LEVEL_4K) 1215a9624afSPaolo Bonzini spte |= PT_PAGE_SIZE_MASK; 1225a9624afSPaolo Bonzini if (tdp_enabled) 123b3646477SJason Baron spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 1245a9624afSPaolo Bonzini kvm_is_mmio_pfn(pfn)); 1255a9624afSPaolo Bonzini 1265a9624afSPaolo Bonzini if (host_writable) 1275a9624afSPaolo Bonzini spte |= SPTE_HOST_WRITEABLE; 1285a9624afSPaolo Bonzini else 1295a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 1305a9624afSPaolo Bonzini 1315a9624afSPaolo Bonzini if (!kvm_is_mmio_pfn(pfn)) 1325a9624afSPaolo Bonzini spte |= shadow_me_mask; 1335a9624afSPaolo Bonzini 1345a9624afSPaolo Bonzini spte |= (u64)pfn << PAGE_SHIFT; 1355a9624afSPaolo Bonzini 1365a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) { 1375a9624afSPaolo Bonzini spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; 1385a9624afSPaolo Bonzini 1395a9624afSPaolo Bonzini /* 1405a9624afSPaolo Bonzini * Optimization: for pte sync, if spte was writable the hash 1415a9624afSPaolo Bonzini * lookup is unnecessary (and expensive). Write protection 1425a9624afSPaolo Bonzini * is responsibility of mmu_get_page / kvm_sync_page. 1435a9624afSPaolo Bonzini * Same reasoning can be applied to dirty page accounting. 1445a9624afSPaolo Bonzini */ 1455a9624afSPaolo Bonzini if (!can_unsync && is_writable_pte(old_spte)) 1465a9624afSPaolo Bonzini goto out; 1475a9624afSPaolo Bonzini 1485a9624afSPaolo Bonzini if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1495a9624afSPaolo Bonzini pgprintk("%s: found shadow page for %llx, marking ro\n", 1505a9624afSPaolo Bonzini __func__, gfn); 1515a9624afSPaolo Bonzini ret |= SET_SPTE_WRITE_PROTECTED_PT; 1525a9624afSPaolo Bonzini pte_access &= ~ACC_WRITE_MASK; 1535a9624afSPaolo Bonzini spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 1545a9624afSPaolo Bonzini } 1555a9624afSPaolo Bonzini } 1565a9624afSPaolo Bonzini 1575a9624afSPaolo Bonzini if (pte_access & ACC_WRITE_MASK) 1585a9624afSPaolo Bonzini spte |= spte_shadow_dirty_mask(spte); 1595a9624afSPaolo Bonzini 1605a9624afSPaolo Bonzini if (speculative) 1615a9624afSPaolo Bonzini spte = mark_spte_for_access_track(spte); 1625a9624afSPaolo Bonzini 1635a9624afSPaolo Bonzini out: 1645a9624afSPaolo Bonzini *new_spte = spte; 1655a9624afSPaolo Bonzini return ret; 1665a9624afSPaolo Bonzini } 1675a9624afSPaolo Bonzini 1685a9624afSPaolo Bonzini u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 1695a9624afSPaolo Bonzini { 1705a9624afSPaolo Bonzini u64 spte; 1715a9624afSPaolo Bonzini 1725a9624afSPaolo Bonzini spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 1735a9624afSPaolo Bonzini shadow_user_mask | shadow_x_mask | shadow_me_mask; 1745a9624afSPaolo Bonzini 1755a9624afSPaolo Bonzini if (ad_disabled) 1765a9624afSPaolo Bonzini spte |= SPTE_AD_DISABLED_MASK; 1775a9624afSPaolo Bonzini else 1785a9624afSPaolo Bonzini spte |= shadow_accessed_mask; 1795a9624afSPaolo Bonzini 1805a9624afSPaolo Bonzini return spte; 1815a9624afSPaolo Bonzini } 1825a9624afSPaolo Bonzini 1835a9624afSPaolo Bonzini u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) 1845a9624afSPaolo Bonzini { 1855a9624afSPaolo Bonzini u64 new_spte; 1865a9624afSPaolo Bonzini 1875a9624afSPaolo Bonzini new_spte = old_spte & ~PT64_BASE_ADDR_MASK; 1885a9624afSPaolo Bonzini new_spte |= (u64)new_pfn << PAGE_SHIFT; 1895a9624afSPaolo Bonzini 1905a9624afSPaolo Bonzini new_spte &= ~PT_WRITABLE_MASK; 1915a9624afSPaolo Bonzini new_spte &= ~SPTE_HOST_WRITEABLE; 1925a9624afSPaolo Bonzini 1935a9624afSPaolo Bonzini new_spte = mark_spte_for_access_track(new_spte); 1945a9624afSPaolo Bonzini 1955a9624afSPaolo Bonzini return new_spte; 1965a9624afSPaolo Bonzini } 1975a9624afSPaolo Bonzini 1985a9624afSPaolo Bonzini static u8 kvm_get_shadow_phys_bits(void) 1995a9624afSPaolo Bonzini { 2005a9624afSPaolo Bonzini /* 2015a9624afSPaolo Bonzini * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 2025a9624afSPaolo Bonzini * in CPU detection code, but the processor treats those reduced bits as 2035a9624afSPaolo Bonzini * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 2045a9624afSPaolo Bonzini * the physical address bits reported by CPUID. 2055a9624afSPaolo Bonzini */ 2065a9624afSPaolo Bonzini if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 2075a9624afSPaolo Bonzini return cpuid_eax(0x80000008) & 0xff; 2085a9624afSPaolo Bonzini 2095a9624afSPaolo Bonzini /* 2105a9624afSPaolo Bonzini * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 2115a9624afSPaolo Bonzini * custom CPUID. Proceed with whatever the kernel found since these features 2125a9624afSPaolo Bonzini * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 2135a9624afSPaolo Bonzini */ 2145a9624afSPaolo Bonzini return boot_cpu_data.x86_phys_bits; 2155a9624afSPaolo Bonzini } 2165a9624afSPaolo Bonzini 2175a9624afSPaolo Bonzini u64 mark_spte_for_access_track(u64 spte) 2185a9624afSPaolo Bonzini { 2195a9624afSPaolo Bonzini if (spte_ad_enabled(spte)) 2205a9624afSPaolo Bonzini return spte & ~shadow_accessed_mask; 2215a9624afSPaolo Bonzini 2225a9624afSPaolo Bonzini if (is_access_track_spte(spte)) 2235a9624afSPaolo Bonzini return spte; 2245a9624afSPaolo Bonzini 2255a9624afSPaolo Bonzini /* 2265a9624afSPaolo Bonzini * Making an Access Tracking PTE will result in removal of write access 2275a9624afSPaolo Bonzini * from the PTE. So, verify that we will be able to restore the write 2285a9624afSPaolo Bonzini * access in the fast page fault path later on. 2295a9624afSPaolo Bonzini */ 2305a9624afSPaolo Bonzini WARN_ONCE((spte & PT_WRITABLE_MASK) && 2315a9624afSPaolo Bonzini !spte_can_locklessly_be_made_writable(spte), 2325a9624afSPaolo Bonzini "kvm: Writable SPTE is not locklessly dirty-trackable\n"); 2335a9624afSPaolo Bonzini 2348a967d65SPaolo Bonzini WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 2358a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 2365a9624afSPaolo Bonzini "kvm: Access Tracking saved bit locations are not zero\n"); 2375a9624afSPaolo Bonzini 2388a967d65SPaolo Bonzini spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 2398a967d65SPaolo Bonzini SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 2405a9624afSPaolo Bonzini spte &= ~shadow_acc_track_mask; 2415a9624afSPaolo Bonzini 2425a9624afSPaolo Bonzini return spte; 2435a9624afSPaolo Bonzini } 2445a9624afSPaolo Bonzini 2455a9624afSPaolo Bonzini void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) 2465a9624afSPaolo Bonzini { 2475a9624afSPaolo Bonzini BUG_ON((u64)(unsigned)access_mask != access_mask); 2485a9624afSPaolo Bonzini WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 249*44aaa015SSean Christopherson 250*44aaa015SSean Christopherson /* 251*44aaa015SSean Christopherson * Disable MMIO caching if the MMIO value collides with the bits that 252*44aaa015SSean Christopherson * are used to hold the relocated GFN when the L1TF mitigation is 253*44aaa015SSean Christopherson * enabled. This should never fire as there is no known hardware that 254*44aaa015SSean Christopherson * can trigger this condition, e.g. SME/SEV CPUs that require a custom 255*44aaa015SSean Christopherson * MMIO value are not susceptible to L1TF. 256*44aaa015SSean Christopherson */ 257*44aaa015SSean Christopherson if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 258*44aaa015SSean Christopherson SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 259*44aaa015SSean Christopherson mmio_value = 0; 260*44aaa015SSean Christopherson 2615a9624afSPaolo Bonzini shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; 2625a9624afSPaolo Bonzini shadow_mmio_access_mask = access_mask; 2635a9624afSPaolo Bonzini } 2645a9624afSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 2655a9624afSPaolo Bonzini 2665a9624afSPaolo Bonzini /* 2675a9624afSPaolo Bonzini * Sets the shadow PTE masks used by the MMU. 2685a9624afSPaolo Bonzini * 2695a9624afSPaolo Bonzini * Assumptions: 2705a9624afSPaolo Bonzini * - Setting either @accessed_mask or @dirty_mask requires setting both 2715a9624afSPaolo Bonzini * - At least one of @accessed_mask or @acc_track_mask must be set 2725a9624afSPaolo Bonzini */ 2735a9624afSPaolo Bonzini void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 2745a9624afSPaolo Bonzini u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, 2755a9624afSPaolo Bonzini u64 acc_track_mask, u64 me_mask) 2765a9624afSPaolo Bonzini { 2775a9624afSPaolo Bonzini BUG_ON(!dirty_mask != !accessed_mask); 2785a9624afSPaolo Bonzini BUG_ON(!accessed_mask && !acc_track_mask); 2795a9624afSPaolo Bonzini BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); 2805a9624afSPaolo Bonzini 2815a9624afSPaolo Bonzini shadow_user_mask = user_mask; 2825a9624afSPaolo Bonzini shadow_accessed_mask = accessed_mask; 2835a9624afSPaolo Bonzini shadow_dirty_mask = dirty_mask; 2845a9624afSPaolo Bonzini shadow_nx_mask = nx_mask; 2855a9624afSPaolo Bonzini shadow_x_mask = x_mask; 2865a9624afSPaolo Bonzini shadow_present_mask = p_mask; 2875a9624afSPaolo Bonzini shadow_acc_track_mask = acc_track_mask; 2885a9624afSPaolo Bonzini shadow_me_mask = me_mask; 2895a9624afSPaolo Bonzini } 2905a9624afSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 2915a9624afSPaolo Bonzini 2925a9624afSPaolo Bonzini void kvm_mmu_reset_all_pte_masks(void) 2935a9624afSPaolo Bonzini { 2945a9624afSPaolo Bonzini u8 low_phys_bits; 2955a9624afSPaolo Bonzini 2965a9624afSPaolo Bonzini shadow_user_mask = 0; 2975a9624afSPaolo Bonzini shadow_accessed_mask = 0; 2985a9624afSPaolo Bonzini shadow_dirty_mask = 0; 2995a9624afSPaolo Bonzini shadow_nx_mask = 0; 3005a9624afSPaolo Bonzini shadow_x_mask = 0; 3015a9624afSPaolo Bonzini shadow_present_mask = 0; 3025a9624afSPaolo Bonzini shadow_acc_track_mask = 0; 3035a9624afSPaolo Bonzini 3045a9624afSPaolo Bonzini shadow_phys_bits = kvm_get_shadow_phys_bits(); 3055a9624afSPaolo Bonzini 3065a9624afSPaolo Bonzini /* 3075a9624afSPaolo Bonzini * If the CPU has 46 or less physical address bits, then set an 3085a9624afSPaolo Bonzini * appropriate mask to guard against L1TF attacks. Otherwise, it is 3095a9624afSPaolo Bonzini * assumed that the CPU is not vulnerable to L1TF. 3105a9624afSPaolo Bonzini * 3115a9624afSPaolo Bonzini * Some Intel CPUs address the L1 cache using more PA bits than are 3125a9624afSPaolo Bonzini * reported by CPUID. Use the PA width of the L1 cache when possible 3135a9624afSPaolo Bonzini * to achieve more effective mitigation, e.g. if system RAM overlaps 3145a9624afSPaolo Bonzini * the most significant bits of legal physical address space. 3155a9624afSPaolo Bonzini */ 3165a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 0; 3175a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_phys_bits; 3185a9624afSPaolo Bonzini if (boot_cpu_has_bug(X86_BUG_L1TF) && 3195a9624afSPaolo Bonzini !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 3208a967d65SPaolo Bonzini 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 3215a9624afSPaolo Bonzini low_phys_bits = boot_cpu_data.x86_cache_bits 3228a967d65SPaolo Bonzini - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 3235a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_mask = 3245a9624afSPaolo Bonzini rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 3255a9624afSPaolo Bonzini } 3265a9624afSPaolo Bonzini 3275a9624afSPaolo Bonzini shadow_nonpresent_or_rsvd_lower_gfn_mask = 3285a9624afSPaolo Bonzini GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 3295a9624afSPaolo Bonzini } 330