1edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H 2edf88417SAvi Kivity #define __KVM_X86_MMU_H 3edf88417SAvi Kivity 4edf88417SAvi Kivity #include <linux/kvm_host.h> 5fc78f519SAvi Kivity #include "kvm_cache_regs.h" 6edf88417SAvi Kivity 78c6d6adcSSheng Yang #define PT64_PT_BITS 9 88c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 98c6d6adcSSheng Yang #define PT32_PT_BITS 10 108c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) 118c6d6adcSSheng Yang 128c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1 138c6d6adcSSheng Yang 148c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0) 158c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) 168c6d6adcSSheng Yang #define PT_USER_MASK (1ULL << 2) 178c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3) 188c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4) 191b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5 201b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 218ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6 228ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) 236fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7 246fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) 258c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7) 268c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8) 278c6d6adcSSheng Yang #define PT64_NX_SHIFT 63 288c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) 298c6d6adcSSheng Yang 308c6d6adcSSheng Yang #define PT_PAT_SHIFT 7 318c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12 328c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) 338c6d6adcSSheng Yang 348c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4 358c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13 368c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \ 378c6d6adcSSheng Yang (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 388c6d6adcSSheng Yang 398c6d6adcSSheng Yang #define PT64_ROOT_LEVEL 4 408c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2 418c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3 428c6d6adcSSheng Yang 43c9c54174SSheng Yang #define PT_PDPE_LEVEL 3 44c9c54174SSheng Yang #define PT_DIRECTORY_LEVEL 2 45c9c54174SSheng Yang #define PT_PAGE_TABLE_LEVEL 1 468a3d08f1SXiao Guangrong #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) 47c9c54174SSheng Yang 48d1431483STiejun Chen static inline u64 rsvd_bits(int s, int e) 49d1431483STiejun Chen { 50d1431483STiejun Chen return ((1ULL << (e - s + 1)) - 1) << s; 51d1431483STiejun Chen } 52d1431483STiejun Chen 5394d8b056SMarcelo Tosatti int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 54ce88decfSXiao Guangrong void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 55b37fbea6SXiao Guangrong 56*c258b62bSXiao Guangrong void 57*c258b62bSXiao Guangrong reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 58*c258b62bSXiao Guangrong 59b37fbea6SXiao Guangrong /* 60b37fbea6SXiao Guangrong * Return values of handle_mmio_page_fault_common: 61b37fbea6SXiao Guangrong * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction 62b37fbea6SXiao Guangrong * directly. 63f8f55942SXiao Guangrong * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page 64f8f55942SXiao Guangrong * fault path update the mmio spte. 65b37fbea6SXiao Guangrong * RET_MMIO_PF_RETRY: let CPU fault again on the address. 66b37fbea6SXiao Guangrong * RET_MMIO_PF_BUG: bug is detected. 67b37fbea6SXiao Guangrong */ 68b37fbea6SXiao Guangrong enum { 69b37fbea6SXiao Guangrong RET_MMIO_PF_EMULATE = 1, 70f8f55942SXiao Guangrong RET_MMIO_PF_INVALID = 2, 71b37fbea6SXiao Guangrong RET_MMIO_PF_RETRY = 0, 72b37fbea6SXiao Guangrong RET_MMIO_PF_BUG = -1 73b37fbea6SXiao Guangrong }; 74b37fbea6SXiao Guangrong 75ce88decfSXiao Guangrong int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 76ad896af0SPaolo Bonzini void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 77ad896af0SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 7894d8b056SMarcelo Tosatti 79e0df7b9fSDave Hansen static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 80e0df7b9fSDave Hansen { 815d218814SMarcelo Tosatti if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) 8249d5ca26SDave Hansen return kvm->arch.n_max_mmu_pages - 8349d5ca26SDave Hansen kvm->arch.n_used_mmu_pages; 845d218814SMarcelo Tosatti 855d218814SMarcelo Tosatti return 0; 86e0df7b9fSDave Hansen } 87e0df7b9fSDave Hansen 88edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) 89edf88417SAvi Kivity { 90edf88417SAvi Kivity if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) 91edf88417SAvi Kivity return 0; 92edf88417SAvi Kivity 93edf88417SAvi Kivity return kvm_mmu_load(vcpu); 94edf88417SAvi Kivity } 95edf88417SAvi Kivity 9643a3795aSAvi Kivity static inline int is_present_gpte(unsigned long pte) 9720c466b5SDong, Eddie { 9820c466b5SDong, Eddie return pte & PT_PRESENT_MASK; 9920c466b5SDong, Eddie } 10020c466b5SDong, Eddie 101198c74f4SXiao Guangrong /* 102198c74f4SXiao Guangrong * Currently, we have two sorts of write-protection, a) the first one 103198c74f4SXiao Guangrong * write-protects guest page to sync the guest modification, b) another one is 104198c74f4SXiao Guangrong * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences 105198c74f4SXiao Guangrong * between these two sorts are: 106198c74f4SXiao Guangrong * 1) the first case clears SPTE_MMU_WRITEABLE bit. 107198c74f4SXiao Guangrong * 2) the first case requires flushing tlb immediately avoiding corrupting 108198c74f4SXiao Guangrong * shadow page table between all vcpus so it should be in the protection of 109198c74f4SXiao Guangrong * mmu-lock. And the another case does not need to flush tlb until returning 110198c74f4SXiao Guangrong * the dirty bitmap to userspace since it only write-protects the page 111198c74f4SXiao Guangrong * logged in the bitmap, that means the page in the dirty bitmap is not 112198c74f4SXiao Guangrong * missed, so it can flush tlb out of mmu-lock. 113198c74f4SXiao Guangrong * 114198c74f4SXiao Guangrong * So, there is the problem: the first case can meet the corrupted tlb caused 115198c74f4SXiao Guangrong * by another case which write-protects pages but without flush tlb 116198c74f4SXiao Guangrong * immediately. In order to making the first case be aware this problem we let 117198c74f4SXiao Guangrong * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit 118198c74f4SXiao Guangrong * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. 119198c74f4SXiao Guangrong * 120198c74f4SXiao Guangrong * Anyway, whenever a spte is updated (only permission and status bits are 121198c74f4SXiao Guangrong * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes 122198c74f4SXiao Guangrong * readonly, if that happens, we need to flush tlb. Fortunately, 123198c74f4SXiao Guangrong * mmu_spte_update() has already handled it perfectly. 124198c74f4SXiao Guangrong * 125198c74f4SXiao Guangrong * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: 126198c74f4SXiao Guangrong * - if we want to see if it has writable tlb entry or if the spte can be 127198c74f4SXiao Guangrong * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most 128198c74f4SXiao Guangrong * case, otherwise 129198c74f4SXiao Guangrong * - if we fix page fault on the spte or do write-protection by dirty logging, 130198c74f4SXiao Guangrong * check PT_WRITABLE_MASK. 131198c74f4SXiao Guangrong * 132198c74f4SXiao Guangrong * TODO: introduce APIs to split these two cases. 133198c74f4SXiao Guangrong */ 134bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte) 135bebb106aSXiao Guangrong { 136bebb106aSXiao Guangrong return pte & PT_WRITABLE_MASK; 137bebb106aSXiao Guangrong } 138bebb106aSXiao Guangrong 139bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu) 140bebb106aSXiao Guangrong { 141bebb106aSXiao Guangrong return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 142bebb106aSXiao Guangrong } 143bebb106aSXiao Guangrong 14497d64b78SAvi Kivity /* 14597d64b78SAvi Kivity * Will a fault with a given page-fault error code (pfec) cause a permission 14697d64b78SAvi Kivity * fault with the given access (in ACC_* format)? 14797d64b78SAvi Kivity */ 14897ec8c06SFeng Wu static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 14997ec8c06SFeng Wu unsigned pte_access, unsigned pfec) 150bebb106aSXiao Guangrong { 15197ec8c06SFeng Wu int cpl = kvm_x86_ops->get_cpl(vcpu); 15297ec8c06SFeng Wu unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 15397ec8c06SFeng Wu 15497ec8c06SFeng Wu /* 15597ec8c06SFeng Wu * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. 15697ec8c06SFeng Wu * 15797ec8c06SFeng Wu * If CPL = 3, SMAP applies to all supervisor-mode data accesses 15897ec8c06SFeng Wu * (these are implicit supervisor accesses) regardless of the value 15997ec8c06SFeng Wu * of EFLAGS.AC. 16097ec8c06SFeng Wu * 16197ec8c06SFeng Wu * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving 16297ec8c06SFeng Wu * the result in X86_EFLAGS_AC. We then insert it in place of 16397ec8c06SFeng Wu * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, 16497ec8c06SFeng Wu * but it will be one in index if SMAP checks are being overridden. 16597ec8c06SFeng Wu * It is important to keep this branchless. 16697ec8c06SFeng Wu */ 16797ec8c06SFeng Wu unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); 16897ec8c06SFeng Wu int index = (pfec >> 1) + 16997ec8c06SFeng Wu (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 17097ec8c06SFeng Wu 171ceee7df7SXiao Guangrong WARN_ON(pfec & PFERR_RSVD_MASK); 172ceee7df7SXiao Guangrong 17397ec8c06SFeng Wu return (mmu->permissions[index] >> pte_access) & 1; 174bebb106aSXiao Guangrong } 17597d64b78SAvi Kivity 1765304b8d3SXiao Guangrong void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 177efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); 178edf88417SAvi Kivity #endif 179