xref: /openbmc/linux/arch/x86/kvm/mmu/spte.c (revision edea7c4f)
15a9624afSPaolo Bonzini // SPDX-License-Identifier: GPL-2.0-only
25a9624afSPaolo Bonzini /*
35a9624afSPaolo Bonzini  * Kernel-based Virtual Machine driver for Linux
45a9624afSPaolo Bonzini  *
55a9624afSPaolo Bonzini  * Macros and functions to access KVM PTEs (also known as SPTEs)
65a9624afSPaolo Bonzini  *
75a9624afSPaolo Bonzini  * Copyright (C) 2006 Qumranet, Inc.
85a9624afSPaolo Bonzini  * Copyright 2020 Red Hat, Inc. and/or its affiliates.
95a9624afSPaolo Bonzini  */
105a9624afSPaolo Bonzini 
115a9624afSPaolo Bonzini 
125a9624afSPaolo Bonzini #include <linux/kvm_host.h>
135a9624afSPaolo Bonzini #include "mmu.h"
145a9624afSPaolo Bonzini #include "mmu_internal.h"
155a9624afSPaolo Bonzini #include "x86.h"
165a9624afSPaolo Bonzini #include "spte.h"
175a9624afSPaolo Bonzini 
185a9624afSPaolo Bonzini #include <asm/e820/api.h>
19e7b7bdeaSSean Christopherson #include <asm/vmx.h>
205a9624afSPaolo Bonzini 
21b09763daSSean Christopherson static bool __read_mostly enable_mmio_caching = true;
22b09763daSSean Christopherson module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
23b09763daSSean Christopherson 
245fc3424fSSean Christopherson u64 __read_mostly shadow_host_writable_mask;
255fc3424fSSean Christopherson u64 __read_mostly shadow_mmu_writable_mask;
265a9624afSPaolo Bonzini u64 __read_mostly shadow_nx_mask;
275a9624afSPaolo Bonzini u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
285a9624afSPaolo Bonzini u64 __read_mostly shadow_user_mask;
295a9624afSPaolo Bonzini u64 __read_mostly shadow_accessed_mask;
305a9624afSPaolo Bonzini u64 __read_mostly shadow_dirty_mask;
315a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_value;
328120337aSSean Christopherson u64 __read_mostly shadow_mmio_mask;
335a9624afSPaolo Bonzini u64 __read_mostly shadow_mmio_access_mask;
345a9624afSPaolo Bonzini u64 __read_mostly shadow_present_mask;
355a9624afSPaolo Bonzini u64 __read_mostly shadow_me_mask;
365a9624afSPaolo Bonzini u64 __read_mostly shadow_acc_track_mask;
375a9624afSPaolo Bonzini 
385a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
395a9624afSPaolo Bonzini u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
405a9624afSPaolo Bonzini 
415a9624afSPaolo Bonzini u8 __read_mostly shadow_phys_bits;
425a9624afSPaolo Bonzini 
435a9624afSPaolo Bonzini static u64 generation_mmio_spte_mask(u64 gen)
445a9624afSPaolo Bonzini {
455a9624afSPaolo Bonzini 	u64 mask;
465a9624afSPaolo Bonzini 
475a9624afSPaolo Bonzini 	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
485a9624afSPaolo Bonzini 
4934c0f6f2SMaciej S. Szmigiero 	mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
5034c0f6f2SMaciej S. Szmigiero 	mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
515a9624afSPaolo Bonzini 	return mask;
525a9624afSPaolo Bonzini }
535a9624afSPaolo Bonzini 
545a9624afSPaolo Bonzini u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
555a9624afSPaolo Bonzini {
565a9624afSPaolo Bonzini 	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
57c236d962SSean Christopherson 	u64 spte = generation_mmio_spte_mask(gen);
585a9624afSPaolo Bonzini 	u64 gpa = gfn << PAGE_SHIFT;
595a9624afSPaolo Bonzini 
6030ab5901SSean Christopherson 	WARN_ON_ONCE(!shadow_mmio_value);
6130ab5901SSean Christopherson 
625a9624afSPaolo Bonzini 	access &= shadow_mmio_access_mask;
63c236d962SSean Christopherson 	spte |= shadow_mmio_value | access;
64c236d962SSean Christopherson 	spte |= gpa | shadow_nonpresent_or_rsvd_mask;
65c236d962SSean Christopherson 	spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
668a967d65SPaolo Bonzini 		<< SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
675a9624afSPaolo Bonzini 
68c236d962SSean Christopherson 	return spte;
695a9624afSPaolo Bonzini }
705a9624afSPaolo Bonzini 
715a9624afSPaolo Bonzini static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
725a9624afSPaolo Bonzini {
735a9624afSPaolo Bonzini 	if (pfn_valid(pfn))
745a9624afSPaolo Bonzini 		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
755a9624afSPaolo Bonzini 			/*
765a9624afSPaolo Bonzini 			 * Some reserved pages, such as those from NVDIMM
775a9624afSPaolo Bonzini 			 * DAX devices, are not for MMIO, and can be mapped
785a9624afSPaolo Bonzini 			 * with cached memory type for better performance.
795a9624afSPaolo Bonzini 			 * However, the above check misconceives those pages
805a9624afSPaolo Bonzini 			 * as MMIO, and results in KVM mapping them with UC
815a9624afSPaolo Bonzini 			 * memory type, which would hurt the performance.
825a9624afSPaolo Bonzini 			 * Therefore, we check the host memory type in addition
835a9624afSPaolo Bonzini 			 * and only treat UC/UC-/WC pages as MMIO.
845a9624afSPaolo Bonzini 			 */
855a9624afSPaolo Bonzini 			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
865a9624afSPaolo Bonzini 
875a9624afSPaolo Bonzini 	return !e820__mapped_raw_any(pfn_to_hpa(pfn),
885a9624afSPaolo Bonzini 				     pfn_to_hpa(pfn + 1) - 1,
895a9624afSPaolo Bonzini 				     E820_TYPE_RAM);
905a9624afSPaolo Bonzini }
915a9624afSPaolo Bonzini 
925a9624afSPaolo Bonzini int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
935a9624afSPaolo Bonzini 		     gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
945a9624afSPaolo Bonzini 		     bool can_unsync, bool host_writable, bool ad_disabled,
955a9624afSPaolo Bonzini 		     u64 *new_spte)
965a9624afSPaolo Bonzini {
97*edea7c4fSSean Christopherson 	u64 spte = SPTE_MMU_PRESENT_MASK;
985a9624afSPaolo Bonzini 	int ret = 0;
995a9624afSPaolo Bonzini 
1005a9624afSPaolo Bonzini 	if (ad_disabled)
1018a406c89SSean Christopherson 		spte |= SPTE_TDP_AD_DISABLED_MASK;
1025a9624afSPaolo Bonzini 	else if (kvm_vcpu_ad_need_write_protect(vcpu))
1038a406c89SSean Christopherson 		spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
1048a406c89SSean Christopherson 
1058a406c89SSean Christopherson 	/*
1068a406c89SSean Christopherson 	 * Bits 62:52 of PAE SPTEs are reserved.  WARN if said bits are set
1078a406c89SSean Christopherson 	 * if PAE paging may be employed (shadow paging or any 32-bit KVM).
1088a406c89SSean Christopherson 	 */
1098a406c89SSean Christopherson 	WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) &&
1108a406c89SSean Christopherson 		     (spte & SPTE_TDP_AD_MASK));
1115a9624afSPaolo Bonzini 
1125a9624afSPaolo Bonzini 	/*
1135a9624afSPaolo Bonzini 	 * For the EPT case, shadow_present_mask is 0 if hardware
1145a9624afSPaolo Bonzini 	 * supports exec-only page table entries.  In that case,
1155a9624afSPaolo Bonzini 	 * ACC_USER_MASK and shadow_user_mask are used to represent
1165a9624afSPaolo Bonzini 	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
1175a9624afSPaolo Bonzini 	 */
1185a9624afSPaolo Bonzini 	spte |= shadow_present_mask;
1195a9624afSPaolo Bonzini 	if (!speculative)
1205a9624afSPaolo Bonzini 		spte |= spte_shadow_accessed_mask(spte);
1215a9624afSPaolo Bonzini 
1225a9624afSPaolo Bonzini 	if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
1235a9624afSPaolo Bonzini 	    is_nx_huge_page_enabled()) {
1245a9624afSPaolo Bonzini 		pte_access &= ~ACC_EXEC_MASK;
1255a9624afSPaolo Bonzini 	}
1265a9624afSPaolo Bonzini 
1275a9624afSPaolo Bonzini 	if (pte_access & ACC_EXEC_MASK)
1285a9624afSPaolo Bonzini 		spte |= shadow_x_mask;
1295a9624afSPaolo Bonzini 	else
1305a9624afSPaolo Bonzini 		spte |= shadow_nx_mask;
1315a9624afSPaolo Bonzini 
1325a9624afSPaolo Bonzini 	if (pte_access & ACC_USER_MASK)
1335a9624afSPaolo Bonzini 		spte |= shadow_user_mask;
1345a9624afSPaolo Bonzini 
1355a9624afSPaolo Bonzini 	if (level > PG_LEVEL_4K)
1365a9624afSPaolo Bonzini 		spte |= PT_PAGE_SIZE_MASK;
1375a9624afSPaolo Bonzini 	if (tdp_enabled)
138b3646477SJason Baron 		spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
1395a9624afSPaolo Bonzini 			kvm_is_mmio_pfn(pfn));
1405a9624afSPaolo Bonzini 
1415a9624afSPaolo Bonzini 	if (host_writable)
1425fc3424fSSean Christopherson 		spte |= shadow_host_writable_mask;
1435a9624afSPaolo Bonzini 	else
1445a9624afSPaolo Bonzini 		pte_access &= ~ACC_WRITE_MASK;
1455a9624afSPaolo Bonzini 
1465a9624afSPaolo Bonzini 	if (!kvm_is_mmio_pfn(pfn))
1475a9624afSPaolo Bonzini 		spte |= shadow_me_mask;
1485a9624afSPaolo Bonzini 
1495a9624afSPaolo Bonzini 	spte |= (u64)pfn << PAGE_SHIFT;
1505a9624afSPaolo Bonzini 
1515a9624afSPaolo Bonzini 	if (pte_access & ACC_WRITE_MASK) {
1525fc3424fSSean Christopherson 		spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
1535a9624afSPaolo Bonzini 
1545a9624afSPaolo Bonzini 		/*
1555a9624afSPaolo Bonzini 		 * Optimization: for pte sync, if spte was writable the hash
1565a9624afSPaolo Bonzini 		 * lookup is unnecessary (and expensive). Write protection
1575a9624afSPaolo Bonzini 		 * is responsibility of mmu_get_page / kvm_sync_page.
1585a9624afSPaolo Bonzini 		 * Same reasoning can be applied to dirty page accounting.
1595a9624afSPaolo Bonzini 		 */
1605a9624afSPaolo Bonzini 		if (!can_unsync && is_writable_pte(old_spte))
1615a9624afSPaolo Bonzini 			goto out;
1625a9624afSPaolo Bonzini 
1635a9624afSPaolo Bonzini 		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1645a9624afSPaolo Bonzini 			pgprintk("%s: found shadow page for %llx, marking ro\n",
1655a9624afSPaolo Bonzini 				 __func__, gfn);
1665a9624afSPaolo Bonzini 			ret |= SET_SPTE_WRITE_PROTECTED_PT;
1675a9624afSPaolo Bonzini 			pte_access &= ~ACC_WRITE_MASK;
1685fc3424fSSean Christopherson 			spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1695a9624afSPaolo Bonzini 		}
1705a9624afSPaolo Bonzini 	}
1715a9624afSPaolo Bonzini 
1725a9624afSPaolo Bonzini 	if (pte_access & ACC_WRITE_MASK)
1735a9624afSPaolo Bonzini 		spte |= spte_shadow_dirty_mask(spte);
1745a9624afSPaolo Bonzini 
1755a9624afSPaolo Bonzini 	if (speculative)
1765a9624afSPaolo Bonzini 		spte = mark_spte_for_access_track(spte);
1775a9624afSPaolo Bonzini 
1785a9624afSPaolo Bonzini out:
1798120337aSSean Christopherson 	WARN_ON(is_mmio_spte(spte));
1805a9624afSPaolo Bonzini 	*new_spte = spte;
1815a9624afSPaolo Bonzini 	return ret;
1825a9624afSPaolo Bonzini }
1835a9624afSPaolo Bonzini 
1845a9624afSPaolo Bonzini u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
1855a9624afSPaolo Bonzini {
186*edea7c4fSSean Christopherson 	u64 spte = SPTE_MMU_PRESENT_MASK;
1875a9624afSPaolo Bonzini 
188*edea7c4fSSean Christopherson 	spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
1895a9624afSPaolo Bonzini 		shadow_user_mask | shadow_x_mask | shadow_me_mask;
1905a9624afSPaolo Bonzini 
1915a9624afSPaolo Bonzini 	if (ad_disabled)
1928a406c89SSean Christopherson 		spte |= SPTE_TDP_AD_DISABLED_MASK;
1935a9624afSPaolo Bonzini 	else
1945a9624afSPaolo Bonzini 		spte |= shadow_accessed_mask;
1955a9624afSPaolo Bonzini 
1965a9624afSPaolo Bonzini 	return spte;
1975a9624afSPaolo Bonzini }
1985a9624afSPaolo Bonzini 
1995a9624afSPaolo Bonzini u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
2005a9624afSPaolo Bonzini {
2015a9624afSPaolo Bonzini 	u64 new_spte;
2025a9624afSPaolo Bonzini 
2035a9624afSPaolo Bonzini 	new_spte = old_spte & ~PT64_BASE_ADDR_MASK;
2045a9624afSPaolo Bonzini 	new_spte |= (u64)new_pfn << PAGE_SHIFT;
2055a9624afSPaolo Bonzini 
2065a9624afSPaolo Bonzini 	new_spte &= ~PT_WRITABLE_MASK;
2075fc3424fSSean Christopherson 	new_spte &= ~shadow_host_writable_mask;
2085a9624afSPaolo Bonzini 
2095a9624afSPaolo Bonzini 	new_spte = mark_spte_for_access_track(new_spte);
2105a9624afSPaolo Bonzini 
2115a9624afSPaolo Bonzini 	return new_spte;
2125a9624afSPaolo Bonzini }
2135a9624afSPaolo Bonzini 
2145a9624afSPaolo Bonzini static u8 kvm_get_shadow_phys_bits(void)
2155a9624afSPaolo Bonzini {
2165a9624afSPaolo Bonzini 	/*
2175a9624afSPaolo Bonzini 	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
2185a9624afSPaolo Bonzini 	 * in CPU detection code, but the processor treats those reduced bits as
2195a9624afSPaolo Bonzini 	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
2205a9624afSPaolo Bonzini 	 * the physical address bits reported by CPUID.
2215a9624afSPaolo Bonzini 	 */
2225a9624afSPaolo Bonzini 	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
2235a9624afSPaolo Bonzini 		return cpuid_eax(0x80000008) & 0xff;
2245a9624afSPaolo Bonzini 
2255a9624afSPaolo Bonzini 	/*
2265a9624afSPaolo Bonzini 	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
2275a9624afSPaolo Bonzini 	 * custom CPUID.  Proceed with whatever the kernel found since these features
2285a9624afSPaolo Bonzini 	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
2295a9624afSPaolo Bonzini 	 */
2305a9624afSPaolo Bonzini 	return boot_cpu_data.x86_phys_bits;
2315a9624afSPaolo Bonzini }
2325a9624afSPaolo Bonzini 
2335a9624afSPaolo Bonzini u64 mark_spte_for_access_track(u64 spte)
2345a9624afSPaolo Bonzini {
2355a9624afSPaolo Bonzini 	if (spte_ad_enabled(spte))
2365a9624afSPaolo Bonzini 		return spte & ~shadow_accessed_mask;
2375a9624afSPaolo Bonzini 
2385a9624afSPaolo Bonzini 	if (is_access_track_spte(spte))
2395a9624afSPaolo Bonzini 		return spte;
2405a9624afSPaolo Bonzini 
2415a9624afSPaolo Bonzini 	/*
2425a9624afSPaolo Bonzini 	 * Making an Access Tracking PTE will result in removal of write access
2435a9624afSPaolo Bonzini 	 * from the PTE. So, verify that we will be able to restore the write
2445a9624afSPaolo Bonzini 	 * access in the fast page fault path later on.
2455a9624afSPaolo Bonzini 	 */
2465a9624afSPaolo Bonzini 	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
2475a9624afSPaolo Bonzini 		  !spte_can_locklessly_be_made_writable(spte),
2485a9624afSPaolo Bonzini 		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");
2495a9624afSPaolo Bonzini 
2508a967d65SPaolo Bonzini 	WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
2518a967d65SPaolo Bonzini 			  SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
2525a9624afSPaolo Bonzini 		  "kvm: Access Tracking saved bit locations are not zero\n");
2535a9624afSPaolo Bonzini 
2548a967d65SPaolo Bonzini 	spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
2558a967d65SPaolo Bonzini 		SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
2565a9624afSPaolo Bonzini 	spte &= ~shadow_acc_track_mask;
2575a9624afSPaolo Bonzini 
2585a9624afSPaolo Bonzini 	return spte;
2595a9624afSPaolo Bonzini }
2605a9624afSPaolo Bonzini 
2618120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
2625a9624afSPaolo Bonzini {
2635a9624afSPaolo Bonzini 	BUG_ON((u64)(unsigned)access_mask != access_mask);
2645a9624afSPaolo Bonzini 	WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
26544aaa015SSean Christopherson 
266b09763daSSean Christopherson 	if (!enable_mmio_caching)
267b09763daSSean Christopherson 		mmio_value = 0;
268b09763daSSean Christopherson 
26944aaa015SSean Christopherson 	/*
27044aaa015SSean Christopherson 	 * Disable MMIO caching if the MMIO value collides with the bits that
27144aaa015SSean Christopherson 	 * are used to hold the relocated GFN when the L1TF mitigation is
27244aaa015SSean Christopherson 	 * enabled.  This should never fire as there is no known hardware that
27344aaa015SSean Christopherson 	 * can trigger this condition, e.g. SME/SEV CPUs that require a custom
27444aaa015SSean Christopherson 	 * MMIO value are not susceptible to L1TF.
27544aaa015SSean Christopherson 	 */
27644aaa015SSean Christopherson 	if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
27744aaa015SSean Christopherson 				  SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
27844aaa015SSean Christopherson 		mmio_value = 0;
27944aaa015SSean Christopherson 
2808120337aSSean Christopherson 	WARN_ON((mmio_value & mmio_mask) != mmio_value);
2818120337aSSean Christopherson 	shadow_mmio_value = mmio_value;
2828120337aSSean Christopherson 	shadow_mmio_mask  = mmio_mask;
2835a9624afSPaolo Bonzini 	shadow_mmio_access_mask = access_mask;
2845a9624afSPaolo Bonzini }
2855a9624afSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
2865a9624afSPaolo Bonzini 
287e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
2885a9624afSPaolo Bonzini {
289e7b7bdeaSSean Christopherson 	shadow_user_mask	= VMX_EPT_READABLE_MASK;
290e7b7bdeaSSean Christopherson 	shadow_accessed_mask	= has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
291e7b7bdeaSSean Christopherson 	shadow_dirty_mask	= has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
292e7b7bdeaSSean Christopherson 	shadow_nx_mask		= 0ull;
293e7b7bdeaSSean Christopherson 	shadow_x_mask		= VMX_EPT_EXECUTABLE_MASK;
294e7b7bdeaSSean Christopherson 	shadow_present_mask	= has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
295e7b7bdeaSSean Christopherson 	shadow_acc_track_mask	= VMX_EPT_RWX_MASK;
296e7b7bdeaSSean Christopherson 	shadow_me_mask		= 0ull;
2975a9624afSPaolo Bonzini 
298613a3f37SSean Christopherson 	shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
299613a3f37SSean Christopherson 	shadow_mmu_writable_mask  = EPT_SPTE_MMU_WRITABLE;
300613a3f37SSean Christopherson 
301e7b7bdeaSSean Christopherson 	/*
302e7b7bdeaSSean Christopherson 	 * EPT Misconfigurations are generated if the value of bits 2:0
303e7b7bdeaSSean Christopherson 	 * of an EPT paging-structure entry is 110b (write/execute).
304e7b7bdeaSSean Christopherson 	 */
305e7b7bdeaSSean Christopherson 	kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
306e7b7bdeaSSean Christopherson 				   VMX_EPT_RWX_MASK, 0);
3075a9624afSPaolo Bonzini }
308e7b7bdeaSSean Christopherson EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
3095a9624afSPaolo Bonzini 
3105a9624afSPaolo Bonzini void kvm_mmu_reset_all_pte_masks(void)
3115a9624afSPaolo Bonzini {
3125a9624afSPaolo Bonzini 	u8 low_phys_bits;
313d6b87f25SSean Christopherson 	u64 mask;
3145a9624afSPaolo Bonzini 
3155a9624afSPaolo Bonzini 	shadow_phys_bits = kvm_get_shadow_phys_bits();
3165a9624afSPaolo Bonzini 
3175a9624afSPaolo Bonzini 	/*
3185a9624afSPaolo Bonzini 	 * If the CPU has 46 or less physical address bits, then set an
3195a9624afSPaolo Bonzini 	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
3205a9624afSPaolo Bonzini 	 * assumed that the CPU is not vulnerable to L1TF.
3215a9624afSPaolo Bonzini 	 *
3225a9624afSPaolo Bonzini 	 * Some Intel CPUs address the L1 cache using more PA bits than are
3235a9624afSPaolo Bonzini 	 * reported by CPUID. Use the PA width of the L1 cache when possible
3245a9624afSPaolo Bonzini 	 * to achieve more effective mitigation, e.g. if system RAM overlaps
3255a9624afSPaolo Bonzini 	 * the most significant bits of legal physical address space.
3265a9624afSPaolo Bonzini 	 */
3275a9624afSPaolo Bonzini 	shadow_nonpresent_or_rsvd_mask = 0;
3285a9624afSPaolo Bonzini 	low_phys_bits = boot_cpu_data.x86_phys_bits;
3295a9624afSPaolo Bonzini 	if (boot_cpu_has_bug(X86_BUG_L1TF) &&
3305a9624afSPaolo Bonzini 	    !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
3318a967d65SPaolo Bonzini 			  52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
3325a9624afSPaolo Bonzini 		low_phys_bits = boot_cpu_data.x86_cache_bits
3338a967d65SPaolo Bonzini 			- SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
3345a9624afSPaolo Bonzini 		shadow_nonpresent_or_rsvd_mask =
3355a9624afSPaolo Bonzini 			rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
3365a9624afSPaolo Bonzini 	}
3375a9624afSPaolo Bonzini 
3385a9624afSPaolo Bonzini 	shadow_nonpresent_or_rsvd_lower_gfn_mask =
3395a9624afSPaolo Bonzini 		GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
340d6b87f25SSean Christopherson 
341e7b7bdeaSSean Christopherson 	shadow_user_mask	= PT_USER_MASK;
342e7b7bdeaSSean Christopherson 	shadow_accessed_mask	= PT_ACCESSED_MASK;
343e7b7bdeaSSean Christopherson 	shadow_dirty_mask	= PT_DIRTY_MASK;
344e7b7bdeaSSean Christopherson 	shadow_nx_mask		= PT64_NX_MASK;
345e7b7bdeaSSean Christopherson 	shadow_x_mask		= 0;
346e7b7bdeaSSean Christopherson 	shadow_present_mask	= PT_PRESENT_MASK;
347e7b7bdeaSSean Christopherson 	shadow_acc_track_mask	= 0;
348e7b7bdeaSSean Christopherson 	shadow_me_mask		= sme_me_mask;
349e7b7bdeaSSean Christopherson 
3505fc3424fSSean Christopherson 	shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE;
3515fc3424fSSean Christopherson 	shadow_mmu_writable_mask  = DEFAULT_SPTE_MMU_WRITEABLE;
3525fc3424fSSean Christopherson 
353d6b87f25SSean Christopherson 	/*
354d6b87f25SSean Christopherson 	 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
355d6b87f25SSean Christopherson 	 * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
356d6b87f25SSean Christopherson 	 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
357d6b87f25SSean Christopherson 	 * 52-bit physical addresses then there are no reserved PA bits in the
358d6b87f25SSean Christopherson 	 * PTEs and so the reserved PA approach must be disabled.
359d6b87f25SSean Christopherson 	 */
360d6b87f25SSean Christopherson 	if (shadow_phys_bits < 52)
361d6b87f25SSean Christopherson 		mask = BIT_ULL(51) | PT_PRESENT_MASK;
362d6b87f25SSean Christopherson 	else
363d6b87f25SSean Christopherson 		mask = 0;
364d6b87f25SSean Christopherson 
365d6b87f25SSean Christopherson 	kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
3665a9624afSPaolo Bonzini }
367