xref: /openbmc/linux/arch/x86/kvm/mmu.h (revision 2a40b9001ec210e51a2bc8647629d7906779fb0b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
789786147SMohammed Gamal #include "cpuid.h"
8edf88417SAvi Kivity 
98c6d6adcSSheng Yang #define PT64_PT_BITS 9
108c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
118c6d6adcSSheng Yang #define PT32_PT_BITS 10
128c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
15be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
168c6d6adcSSheng Yang 
178c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
188c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
208c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
218c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
221b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
231b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
248ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
258ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
266fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
276fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
288c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
298c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
308c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
318c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
328c6d6adcSSheng Yang 
338c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
348c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
358c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
368c6d6adcSSheng Yang 
378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
388c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
398c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
408c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
418c6d6adcSSheng Yang 
42855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
432a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
448c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
458c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
468c6d6adcSSheng Yang 
47d1431483STiejun Chen static inline u64 rsvd_bits(int s, int e)
48d1431483STiejun Chen {
49d1cd3ce9SYu Zhang 	if (e < s)
50d1cd3ce9SYu Zhang 		return 0;
51d1cd3ce9SYu Zhang 
52d1431483STiejun Chen 	return ((1ULL << (e - s + 1)) - 1) << s;
53d1431483STiejun Chen }
54d1431483STiejun Chen 
55e7581cacSPaolo Bonzini void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
56b37fbea6SXiao Guangrong 
57c258b62bSXiao Guangrong void
58c258b62bSXiao Guangrong reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
59c258b62bSXiao Guangrong 
601c53da3fSJunaid Shahid void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
610f04a2acSVitaly Kuznetsov void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
620f04a2acSVitaly Kuznetsov 			     gpa_t nested_cr3);
63ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
6450c28f21SJunaid Shahid 			     bool accessed_dirty, gpa_t new_eptp);
659bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
661261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
67d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
6894d8b056SMarcelo Tosatti 
69edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
70edf88417SAvi Kivity {
7144dd3ffaSVitaly Kuznetsov 	if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
72edf88417SAvi Kivity 		return 0;
73edf88417SAvi Kivity 
74edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
75edf88417SAvi Kivity }
76edf88417SAvi Kivity 
77c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
78c9470a2eSJunaid Shahid {
79c9470a2eSJunaid Shahid 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
80c9470a2eSJunaid Shahid 
81c9470a2eSJunaid Shahid 	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
82c9470a2eSJunaid Shahid 	       ? cr3 & X86_CR3_PCID_MASK
83c9470a2eSJunaid Shahid 	       : 0;
84c9470a2eSJunaid Shahid }
85c9470a2eSJunaid Shahid 
86c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
87c9470a2eSJunaid Shahid {
88c9470a2eSJunaid Shahid 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
89c9470a2eSJunaid Shahid }
90c9470a2eSJunaid Shahid 
91689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
926e42782fSJunaid Shahid {
93*2a40b900SSean Christopherson 	u64 root_hpa = vcpu->arch.mmu->root_hpa;
94*2a40b900SSean Christopherson 
95*2a40b900SSean Christopherson 	if (!VALID_PAGE(root_hpa))
96*2a40b900SSean Christopherson 		return;
97*2a40b900SSean Christopherson 
98*2a40b900SSean Christopherson 	kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
99*2a40b900SSean Christopherson 				 vcpu->arch.mmu->shadow_root_level);
1006e42782fSJunaid Shahid }
1016e42782fSJunaid Shahid 
1027a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
1037a02674dSSean Christopherson 		       bool prefault);
1047a02674dSSean Christopherson 
1057a02674dSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
1067a02674dSSean Christopherson 					u32 err, bool prefault)
1077a02674dSSean Christopherson {
1087a02674dSSean Christopherson #ifdef CONFIG_RETPOLINE
1097a02674dSSean Christopherson 	if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
1107a02674dSSean Christopherson 		return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
1117a02674dSSean Christopherson #endif
1127a02674dSSean Christopherson 	return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
1137a02674dSSean Christopherson }
1147a02674dSSean Christopherson 
115198c74f4SXiao Guangrong /*
116198c74f4SXiao Guangrong  * Currently, we have two sorts of write-protection, a) the first one
117198c74f4SXiao Guangrong  * write-protects guest page to sync the guest modification, b) another one is
118198c74f4SXiao Guangrong  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
119198c74f4SXiao Guangrong  * between these two sorts are:
120198c74f4SXiao Guangrong  * 1) the first case clears SPTE_MMU_WRITEABLE bit.
121198c74f4SXiao Guangrong  * 2) the first case requires flushing tlb immediately avoiding corrupting
122198c74f4SXiao Guangrong  *    shadow page table between all vcpus so it should be in the protection of
123198c74f4SXiao Guangrong  *    mmu-lock. And the another case does not need to flush tlb until returning
124198c74f4SXiao Guangrong  *    the dirty bitmap to userspace since it only write-protects the page
125198c74f4SXiao Guangrong  *    logged in the bitmap, that means the page in the dirty bitmap is not
126198c74f4SXiao Guangrong  *    missed, so it can flush tlb out of mmu-lock.
127198c74f4SXiao Guangrong  *
128198c74f4SXiao Guangrong  * So, there is the problem: the first case can meet the corrupted tlb caused
129198c74f4SXiao Guangrong  * by another case which write-protects pages but without flush tlb
130198c74f4SXiao Guangrong  * immediately. In order to making the first case be aware this problem we let
131198c74f4SXiao Guangrong  * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
132198c74f4SXiao Guangrong  * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
133198c74f4SXiao Guangrong  *
134198c74f4SXiao Guangrong  * Anyway, whenever a spte is updated (only permission and status bits are
135198c74f4SXiao Guangrong  * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
136198c74f4SXiao Guangrong  * readonly, if that happens, we need to flush tlb. Fortunately,
137198c74f4SXiao Guangrong  * mmu_spte_update() has already handled it perfectly.
138198c74f4SXiao Guangrong  *
139198c74f4SXiao Guangrong  * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
140198c74f4SXiao Guangrong  * - if we want to see if it has writable tlb entry or if the spte can be
141198c74f4SXiao Guangrong  *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
142198c74f4SXiao Guangrong  *   case, otherwise
143198c74f4SXiao Guangrong  * - if we fix page fault on the spte or do write-protection by dirty logging,
144198c74f4SXiao Guangrong  *   check PT_WRITABLE_MASK.
145198c74f4SXiao Guangrong  *
146198c74f4SXiao Guangrong  * TODO: introduce APIs to split these two cases.
147198c74f4SXiao Guangrong  */
148bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte)
149bebb106aSXiao Guangrong {
150bebb106aSXiao Guangrong 	return pte & PT_WRITABLE_MASK;
151bebb106aSXiao Guangrong }
152bebb106aSXiao Guangrong 
153bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu)
154bebb106aSXiao Guangrong {
155bebb106aSXiao Guangrong 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
156bebb106aSXiao Guangrong }
157bebb106aSXiao Guangrong 
15889786147SMohammed Gamal static inline bool kvm_mmu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
15989786147SMohammed Gamal {
16089786147SMohammed Gamal         return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
16189786147SMohammed Gamal }
16289786147SMohammed Gamal 
16397d64b78SAvi Kivity /*
164f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
165f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
166f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
167f13577e8SPaolo Bonzini  *
168f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
169f13577e8SPaolo Bonzini  * if the access faults.
17097d64b78SAvi Kivity  */
171f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
172be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
173be94f6b7SHuaitong Han 				  unsigned pfec)
174bebb106aSXiao Guangrong {
175afaf0b2fSSean Christopherson 	int cpl = kvm_x86_ops.get_cpl(vcpu);
176afaf0b2fSSean Christopherson 	unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
17797ec8c06SFeng Wu 
17897ec8c06SFeng Wu 	/*
17997ec8c06SFeng Wu 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
18097ec8c06SFeng Wu 	 *
18197ec8c06SFeng Wu 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
18297ec8c06SFeng Wu 	 * (these are implicit supervisor accesses) regardless of the value
18397ec8c06SFeng Wu 	 * of EFLAGS.AC.
18497ec8c06SFeng Wu 	 *
18597ec8c06SFeng Wu 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
18697ec8c06SFeng Wu 	 * the result in X86_EFLAGS_AC. We then insert it in place of
18797ec8c06SFeng Wu 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
18897ec8c06SFeng Wu 	 * but it will be one in index if SMAP checks are being overridden.
18997ec8c06SFeng Wu 	 * It is important to keep this branchless.
19097ec8c06SFeng Wu 	 */
19197ec8c06SFeng Wu 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
19297ec8c06SFeng Wu 	int index = (pfec >> 1) +
19397ec8c06SFeng Wu 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
194be94f6b7SHuaitong Han 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
1957a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
19697ec8c06SFeng Wu 
197be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
198be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
199be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
200be94f6b7SHuaitong Han 
201be94f6b7SHuaitong Han 		/*
202be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
203be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
204be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
205be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
206be94f6b7SHuaitong Han 		*/
207b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
208be94f6b7SHuaitong Han 
209be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
2107a98205dSXiao Guangrong 		offset = (pfec & ~1) +
211be94f6b7SHuaitong Han 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
212be94f6b7SHuaitong Han 
213be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
2147a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
215be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
216be94f6b7SHuaitong Han 	}
217be94f6b7SHuaitong Han 
2187a98205dSXiao Guangrong 	return -(u32)fault & errcode;
219bebb106aSXiao Guangrong }
22097d64b78SAvi Kivity 
221efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
222547ffaedSXiao Guangrong 
2236ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
2241aa9b957SJunaid Shahid 
2251aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm);
2261aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
2271aa9b957SJunaid Shahid 
228edf88417SAvi Kivity #endif
229