xref: /openbmc/linux/arch/x86/kvm/mmu.h (revision 50c28f21d045dde8c52548f8482d456b3f0956f5)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
7edf88417SAvi Kivity 
88c6d6adcSSheng Yang #define PT64_PT_BITS 9
98c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
108c6d6adcSSheng Yang #define PT32_PT_BITS 10
118c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
128c6d6adcSSheng Yang 
138c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
14be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
158c6d6adcSSheng Yang 
168c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
178c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
18be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
198c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
208c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
211b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
221b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
238ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
248ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
256fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
266fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
278c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
288c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
298c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
308c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
318c6d6adcSSheng Yang 
328c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
338c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
348c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
358c6d6adcSSheng Yang 
368c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
388c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
398c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
408c6d6adcSSheng Yang 
41855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
422a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
438c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
448c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
458c6d6adcSSheng Yang 
46c9c54174SSheng Yang #define PT_PDPE_LEVEL 3
47c9c54174SSheng Yang #define PT_DIRECTORY_LEVEL 2
48c9c54174SSheng Yang #define PT_PAGE_TABLE_LEVEL 1
498a3d08f1SXiao Guangrong #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
50c9c54174SSheng Yang 
51d1431483STiejun Chen static inline u64 rsvd_bits(int s, int e)
52d1431483STiejun Chen {
53d1cd3ce9SYu Zhang 	if (e < s)
54d1cd3ce9SYu Zhang 		return 0;
55d1cd3ce9SYu Zhang 
56d1431483STiejun Chen 	return ((1ULL << (e - s + 1)) - 1) << s;
57d1431483STiejun Chen }
58d1431483STiejun Chen 
59dcdca5feSPeter Feiner void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
60b37fbea6SXiao Guangrong 
61c258b62bSXiao Guangrong void
62c258b62bSXiao Guangrong reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
63c258b62bSXiao Guangrong 
641c53da3fSJunaid Shahid void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
65ad896af0SPaolo Bonzini void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
66ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
67*50c28f21SJunaid Shahid 			     bool accessed_dirty, gpa_t new_eptp);
689bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
691261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
70d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
7194d8b056SMarcelo Tosatti 
72e0df7b9fSDave Hansen static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
73e0df7b9fSDave Hansen {
745d218814SMarcelo Tosatti 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
7549d5ca26SDave Hansen 		return kvm->arch.n_max_mmu_pages -
7649d5ca26SDave Hansen 			kvm->arch.n_used_mmu_pages;
775d218814SMarcelo Tosatti 
785d218814SMarcelo Tosatti 	return 0;
79e0df7b9fSDave Hansen }
80e0df7b9fSDave Hansen 
81edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
82edf88417SAvi Kivity {
83edf88417SAvi Kivity 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
84edf88417SAvi Kivity 		return 0;
85edf88417SAvi Kivity 
86edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
87edf88417SAvi Kivity }
88edf88417SAvi Kivity 
896e42782fSJunaid Shahid static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
906e42782fSJunaid Shahid {
916e42782fSJunaid Shahid 	/* set_cr3() should ensure TLB has been flushed */
926e42782fSJunaid Shahid 	if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
936e42782fSJunaid Shahid 		vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
946e42782fSJunaid Shahid }
956e42782fSJunaid Shahid 
96198c74f4SXiao Guangrong /*
97198c74f4SXiao Guangrong  * Currently, we have two sorts of write-protection, a) the first one
98198c74f4SXiao Guangrong  * write-protects guest page to sync the guest modification, b) another one is
99198c74f4SXiao Guangrong  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
100198c74f4SXiao Guangrong  * between these two sorts are:
101198c74f4SXiao Guangrong  * 1) the first case clears SPTE_MMU_WRITEABLE bit.
102198c74f4SXiao Guangrong  * 2) the first case requires flushing tlb immediately avoiding corrupting
103198c74f4SXiao Guangrong  *    shadow page table between all vcpus so it should be in the protection of
104198c74f4SXiao Guangrong  *    mmu-lock. And the another case does not need to flush tlb until returning
105198c74f4SXiao Guangrong  *    the dirty bitmap to userspace since it only write-protects the page
106198c74f4SXiao Guangrong  *    logged in the bitmap, that means the page in the dirty bitmap is not
107198c74f4SXiao Guangrong  *    missed, so it can flush tlb out of mmu-lock.
108198c74f4SXiao Guangrong  *
109198c74f4SXiao Guangrong  * So, there is the problem: the first case can meet the corrupted tlb caused
110198c74f4SXiao Guangrong  * by another case which write-protects pages but without flush tlb
111198c74f4SXiao Guangrong  * immediately. In order to making the first case be aware this problem we let
112198c74f4SXiao Guangrong  * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
113198c74f4SXiao Guangrong  * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
114198c74f4SXiao Guangrong  *
115198c74f4SXiao Guangrong  * Anyway, whenever a spte is updated (only permission and status bits are
116198c74f4SXiao Guangrong  * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
117198c74f4SXiao Guangrong  * readonly, if that happens, we need to flush tlb. Fortunately,
118198c74f4SXiao Guangrong  * mmu_spte_update() has already handled it perfectly.
119198c74f4SXiao Guangrong  *
120198c74f4SXiao Guangrong  * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
121198c74f4SXiao Guangrong  * - if we want to see if it has writable tlb entry or if the spte can be
122198c74f4SXiao Guangrong  *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
123198c74f4SXiao Guangrong  *   case, otherwise
124198c74f4SXiao Guangrong  * - if we fix page fault on the spte or do write-protection by dirty logging,
125198c74f4SXiao Guangrong  *   check PT_WRITABLE_MASK.
126198c74f4SXiao Guangrong  *
127198c74f4SXiao Guangrong  * TODO: introduce APIs to split these two cases.
128198c74f4SXiao Guangrong  */
129bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte)
130bebb106aSXiao Guangrong {
131bebb106aSXiao Guangrong 	return pte & PT_WRITABLE_MASK;
132bebb106aSXiao Guangrong }
133bebb106aSXiao Guangrong 
134bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu)
135bebb106aSXiao Guangrong {
136bebb106aSXiao Guangrong 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
137bebb106aSXiao Guangrong }
138bebb106aSXiao Guangrong 
13997d64b78SAvi Kivity /*
140f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
141f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
142f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
143f13577e8SPaolo Bonzini  *
144f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
145f13577e8SPaolo Bonzini  * if the access faults.
14697d64b78SAvi Kivity  */
147f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
149be94f6b7SHuaitong Han 				  unsigned pfec)
150bebb106aSXiao Guangrong {
15197ec8c06SFeng Wu 	int cpl = kvm_x86_ops->get_cpl(vcpu);
15297ec8c06SFeng Wu 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
15397ec8c06SFeng Wu 
15497ec8c06SFeng Wu 	/*
15597ec8c06SFeng Wu 	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
15697ec8c06SFeng Wu 	 *
15797ec8c06SFeng Wu 	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
15897ec8c06SFeng Wu 	 * (these are implicit supervisor accesses) regardless of the value
15997ec8c06SFeng Wu 	 * of EFLAGS.AC.
16097ec8c06SFeng Wu 	 *
16197ec8c06SFeng Wu 	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
16297ec8c06SFeng Wu 	 * the result in X86_EFLAGS_AC. We then insert it in place of
16397ec8c06SFeng Wu 	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
16497ec8c06SFeng Wu 	 * but it will be one in index if SMAP checks are being overridden.
16597ec8c06SFeng Wu 	 * It is important to keep this branchless.
16697ec8c06SFeng Wu 	 */
16797ec8c06SFeng Wu 	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
16897ec8c06SFeng Wu 	int index = (pfec >> 1) +
16997ec8c06SFeng Wu 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
170be94f6b7SHuaitong Han 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
1717a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
17297ec8c06SFeng Wu 
173be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
174be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
175be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
176be94f6b7SHuaitong Han 
177be94f6b7SHuaitong Han 		/*
178be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
179be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
180be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
181be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
182be94f6b7SHuaitong Han 		*/
183b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
184be94f6b7SHuaitong Han 
185be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
1867a98205dSXiao Guangrong 		offset = (pfec & ~1) +
187be94f6b7SHuaitong Han 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
188be94f6b7SHuaitong Han 
189be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
1907a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
191be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
192be94f6b7SHuaitong Han 	}
193be94f6b7SHuaitong Han 
1947a98205dSXiao Guangrong 	return -(u32)fault & errcode;
195bebb106aSXiao Guangrong }
19697d64b78SAvi Kivity 
1975304b8d3SXiao Guangrong void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
198efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
199547ffaedSXiao Guangrong 
200547ffaedSXiao Guangrong void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
201547ffaedSXiao Guangrong void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
202aeecee2eSXiao Guangrong bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
203aeecee2eSXiao Guangrong 				    struct kvm_memory_slot *slot, u64 gfn);
204bab4165eSBandan Das int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
205edf88417SAvi Kivity #endif
206