1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H 3edf88417SAvi Kivity #define __KVM_X86_MMU_H 4edf88417SAvi Kivity 5edf88417SAvi Kivity #include <linux/kvm_host.h> 6fc78f519SAvi Kivity #include "kvm_cache_regs.h" 789786147SMohammed Gamal #include "cpuid.h" 8edf88417SAvi Kivity 98c6d6adcSSheng Yang #define PT64_PT_BITS 9 108c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 118c6d6adcSSheng Yang #define PT32_PT_BITS 10 128c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) 138c6d6adcSSheng Yang 148c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1 15be94f6b7SHuaitong Han #define PT_USER_SHIFT 2 168c6d6adcSSheng Yang 178c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0) 188c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) 19be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT) 208c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3) 218c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4) 221b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5 231b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 248ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6 258ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) 266fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7 276fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) 288c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7) 298c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8) 308c6d6adcSSheng Yang #define PT64_NX_SHIFT 63 318c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) 328c6d6adcSSheng Yang 338c6d6adcSSheng Yang #define PT_PAT_SHIFT 7 348c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12 358c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) 368c6d6adcSSheng Yang 378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4 388c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13 398c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \ 408c6d6adcSSheng Yang (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 418c6d6adcSSheng Yang 42855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5 432a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4 448c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2 458c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3 468c6d6adcSSheng Yang 47eb79cd00SSean Christopherson static __always_inline u64 rsvd_bits(int s, int e) 48d1431483STiejun Chen { 49eb79cd00SSean Christopherson BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); 50eb79cd00SSean Christopherson 51eb79cd00SSean Christopherson if (__builtin_constant_p(e)) 52eb79cd00SSean Christopherson BUILD_BUG_ON(e > 63); 53eb79cd00SSean Christopherson else 54eb79cd00SSean Christopherson e &= 63; 55eb79cd00SSean Christopherson 56d1cd3ce9SYu Zhang if (e < s) 57d1cd3ce9SYu Zhang return 0; 58d1cd3ce9SYu Zhang 592f80d502SPaolo Bonzini return ((2ULL << (e - s)) - 1) << s; 60d1431483STiejun Chen } 61d1431483STiejun Chen 628120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); 63e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); 64b37fbea6SXiao Guangrong 65c258b62bSXiao Guangrong void 66c258b62bSXiao Guangrong reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 67c258b62bSXiao Guangrong 681c53da3fSJunaid Shahid void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots); 690f04a2acSVitaly Kuznetsov void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, 700f04a2acSVitaly Kuznetsov gpa_t nested_cr3); 71ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 7250c28f21SJunaid Shahid bool accessed_dirty, gpa_t new_eptp); 739bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); 741261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, 75d0006530SPaolo Bonzini u64 fault_address, char *insn, int insn_len); 7694d8b056SMarcelo Tosatti 7761a1773eSSean Christopherson int kvm_mmu_load(struct kvm_vcpu *vcpu); 7861a1773eSSean Christopherson void kvm_mmu_unload(struct kvm_vcpu *vcpu); 7961a1773eSSean Christopherson void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 8061a1773eSSean Christopherson 81edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) 82edf88417SAvi Kivity { 8344dd3ffaSVitaly Kuznetsov if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) 84edf88417SAvi Kivity return 0; 85edf88417SAvi Kivity 86edf88417SAvi Kivity return kvm_mmu_load(vcpu); 87edf88417SAvi Kivity } 88edf88417SAvi Kivity 89c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) 90c9470a2eSJunaid Shahid { 91c9470a2eSJunaid Shahid BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); 92c9470a2eSJunaid Shahid 93c9470a2eSJunaid Shahid return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE) 94c9470a2eSJunaid Shahid ? cr3 & X86_CR3_PCID_MASK 95c9470a2eSJunaid Shahid : 0; 96c9470a2eSJunaid Shahid } 97c9470a2eSJunaid Shahid 98c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) 99c9470a2eSJunaid Shahid { 100c9470a2eSJunaid Shahid return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); 101c9470a2eSJunaid Shahid } 102c9470a2eSJunaid Shahid 103689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) 1046e42782fSJunaid Shahid { 1052a40b900SSean Christopherson u64 root_hpa = vcpu->arch.mmu->root_hpa; 1062a40b900SSean Christopherson 1072a40b900SSean Christopherson if (!VALID_PAGE(root_hpa)) 1082a40b900SSean Christopherson return; 1092a40b900SSean Christopherson 110e83bc09cSSean Christopherson static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa, 1112a40b900SSean Christopherson vcpu->arch.mmu->shadow_root_level); 1126e42782fSJunaid Shahid } 1136e42782fSJunaid Shahid 1147a02674dSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 1157a02674dSSean Christopherson bool prefault); 1167a02674dSSean Christopherson 1177a02674dSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 1187a02674dSSean Christopherson u32 err, bool prefault) 1197a02674dSSean Christopherson { 1207a02674dSSean Christopherson #ifdef CONFIG_RETPOLINE 1217a02674dSSean Christopherson if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) 1227a02674dSSean Christopherson return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault); 1237a02674dSSean Christopherson #endif 1247a02674dSSean Christopherson return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); 1257a02674dSSean Christopherson } 1267a02674dSSean Christopherson 127198c74f4SXiao Guangrong /* 128198c74f4SXiao Guangrong * Currently, we have two sorts of write-protection, a) the first one 129198c74f4SXiao Guangrong * write-protects guest page to sync the guest modification, b) another one is 130198c74f4SXiao Guangrong * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences 131198c74f4SXiao Guangrong * between these two sorts are: 1325fc3424fSSean Christopherson * 1) the first case clears MMU-writable bit. 133198c74f4SXiao Guangrong * 2) the first case requires flushing tlb immediately avoiding corrupting 134198c74f4SXiao Guangrong * shadow page table between all vcpus so it should be in the protection of 135198c74f4SXiao Guangrong * mmu-lock. And the another case does not need to flush tlb until returning 136198c74f4SXiao Guangrong * the dirty bitmap to userspace since it only write-protects the page 137198c74f4SXiao Guangrong * logged in the bitmap, that means the page in the dirty bitmap is not 138198c74f4SXiao Guangrong * missed, so it can flush tlb out of mmu-lock. 139198c74f4SXiao Guangrong * 140198c74f4SXiao Guangrong * So, there is the problem: the first case can meet the corrupted tlb caused 141198c74f4SXiao Guangrong * by another case which write-protects pages but without flush tlb 142198c74f4SXiao Guangrong * immediately. In order to making the first case be aware this problem we let 1435fc3424fSSean Christopherson * it flush tlb if we try to write-protect a spte whose MMU-writable bit 1445fc3424fSSean Christopherson * is set, it works since another case never touches MMU-writable bit. 145198c74f4SXiao Guangrong * 146198c74f4SXiao Guangrong * Anyway, whenever a spte is updated (only permission and status bits are 1475fc3424fSSean Christopherson * changed) we need to check whether the spte with MMU-writable becomes 148198c74f4SXiao Guangrong * readonly, if that happens, we need to flush tlb. Fortunately, 149198c74f4SXiao Guangrong * mmu_spte_update() has already handled it perfectly. 150198c74f4SXiao Guangrong * 1515fc3424fSSean Christopherson * The rules to use MMU-writable and PT_WRITABLE_MASK: 152198c74f4SXiao Guangrong * - if we want to see if it has writable tlb entry or if the spte can be 1535fc3424fSSean Christopherson * writable on the mmu mapping, check MMU-writable, this is the most 154198c74f4SXiao Guangrong * case, otherwise 155198c74f4SXiao Guangrong * - if we fix page fault on the spte or do write-protection by dirty logging, 156198c74f4SXiao Guangrong * check PT_WRITABLE_MASK. 157198c74f4SXiao Guangrong * 158198c74f4SXiao Guangrong * TODO: introduce APIs to split these two cases. 159198c74f4SXiao Guangrong */ 16015e6a7e5SSean Christopherson static inline bool is_writable_pte(unsigned long pte) 161bebb106aSXiao Guangrong { 162bebb106aSXiao Guangrong return pte & PT_WRITABLE_MASK; 163bebb106aSXiao Guangrong } 164bebb106aSXiao Guangrong 165bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu) 166bebb106aSXiao Guangrong { 167bebb106aSXiao Guangrong return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 168bebb106aSXiao Guangrong } 169bebb106aSXiao Guangrong 17097d64b78SAvi Kivity /* 171f13577e8SPaolo Bonzini * Check if a given access (described through the I/D, W/R and U/S bits of a 172f13577e8SPaolo Bonzini * page fault error code pfec) causes a permission fault with the given PTE 173f13577e8SPaolo Bonzini * access rights (in ACC_* format). 174f13577e8SPaolo Bonzini * 175f13577e8SPaolo Bonzini * Return zero if the access does not fault; return the page fault error code 176f13577e8SPaolo Bonzini * if the access faults. 17797d64b78SAvi Kivity */ 178f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 179be94f6b7SHuaitong Han unsigned pte_access, unsigned pte_pkey, 180be94f6b7SHuaitong Han unsigned pfec) 181bebb106aSXiao Guangrong { 182b3646477SJason Baron int cpl = static_call(kvm_x86_get_cpl)(vcpu); 183b3646477SJason Baron unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 18497ec8c06SFeng Wu 18597ec8c06SFeng Wu /* 18697ec8c06SFeng Wu * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. 18797ec8c06SFeng Wu * 18897ec8c06SFeng Wu * If CPL = 3, SMAP applies to all supervisor-mode data accesses 18997ec8c06SFeng Wu * (these are implicit supervisor accesses) regardless of the value 19097ec8c06SFeng Wu * of EFLAGS.AC. 19197ec8c06SFeng Wu * 19297ec8c06SFeng Wu * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving 19397ec8c06SFeng Wu * the result in X86_EFLAGS_AC. We then insert it in place of 19497ec8c06SFeng Wu * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, 19597ec8c06SFeng Wu * but it will be one in index if SMAP checks are being overridden. 19697ec8c06SFeng Wu * It is important to keep this branchless. 19797ec8c06SFeng Wu */ 19897ec8c06SFeng Wu unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); 19997ec8c06SFeng Wu int index = (pfec >> 1) + 20097ec8c06SFeng Wu (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 201be94f6b7SHuaitong Han bool fault = (mmu->permissions[index] >> pte_access) & 1; 2027a98205dSXiao Guangrong u32 errcode = PFERR_PRESENT_MASK; 20397ec8c06SFeng Wu 204be94f6b7SHuaitong Han WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); 205be94f6b7SHuaitong Han if (unlikely(mmu->pkru_mask)) { 206be94f6b7SHuaitong Han u32 pkru_bits, offset; 207be94f6b7SHuaitong Han 208be94f6b7SHuaitong Han /* 209be94f6b7SHuaitong Han * PKRU defines 32 bits, there are 16 domains and 2 210be94f6b7SHuaitong Han * attribute bits per domain in pkru. pte_pkey is the 211be94f6b7SHuaitong Han * index of the protection domain, so pte_pkey * 2 is 212be94f6b7SHuaitong Han * is the index of the first bit for the domain. 213be94f6b7SHuaitong Han */ 214b9dd21e1SPaolo Bonzini pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; 215be94f6b7SHuaitong Han 216be94f6b7SHuaitong Han /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ 2177a98205dSXiao Guangrong offset = (pfec & ~1) + 218be94f6b7SHuaitong Han ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); 219be94f6b7SHuaitong Han 220be94f6b7SHuaitong Han pkru_bits &= mmu->pkru_mask >> offset; 2217a98205dSXiao Guangrong errcode |= -pkru_bits & PFERR_PK_MASK; 222be94f6b7SHuaitong Han fault |= (pkru_bits != 0); 223be94f6b7SHuaitong Han } 224be94f6b7SHuaitong Han 2257a98205dSXiao Guangrong return -(u32)fault & errcode; 226bebb106aSXiao Guangrong } 22797d64b78SAvi Kivity 228efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); 229547ffaedSXiao Guangrong 2306ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); 2311aa9b957SJunaid Shahid 2321aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm); 2331aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm); 2341aa9b957SJunaid Shahid 235e2209710SBen Gardon static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) 236e2209710SBen Gardon { 237*d501f747SBen Gardon /* 238*d501f747SBen Gardon * Read memslot_have_rmaps before rmap pointers. Hence, threads reading 239*d501f747SBen Gardon * memslots_have_rmaps in any lock context are guaranteed to see the 240*d501f747SBen Gardon * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps. 241*d501f747SBen Gardon */ 242*d501f747SBen Gardon return smp_load_acquire(&kvm->arch.memslots_have_rmaps); 243e2209710SBen Gardon } 244e2209710SBen Gardon 245edf88417SAvi Kivity #endif 246