1edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H 2edf88417SAvi Kivity #define __KVM_X86_MMU_H 3edf88417SAvi Kivity 4edf88417SAvi Kivity #include <linux/kvm_host.h> 5fc78f519SAvi Kivity #include "kvm_cache_regs.h" 6edf88417SAvi Kivity 78c6d6adcSSheng Yang #define PT64_PT_BITS 9 88c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 98c6d6adcSSheng Yang #define PT32_PT_BITS 10 108c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) 118c6d6adcSSheng Yang 128c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1 138c6d6adcSSheng Yang 148c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0) 158c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) 168c6d6adcSSheng Yang #define PT_USER_MASK (1ULL << 2) 178c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3) 188c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4) 191b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5 201b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 218ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6 228ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) 236fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7 246fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) 258c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7) 268c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8) 278c6d6adcSSheng Yang #define PT64_NX_SHIFT 63 288c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) 298c6d6adcSSheng Yang 308c6d6adcSSheng Yang #define PT_PAT_SHIFT 7 318c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12 328c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) 338c6d6adcSSheng Yang 348c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4 358c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13 368c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \ 378c6d6adcSSheng Yang (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 388c6d6adcSSheng Yang 398c6d6adcSSheng Yang #define PT64_ROOT_LEVEL 4 408c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2 418c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3 428c6d6adcSSheng Yang 43c9c54174SSheng Yang #define PT_PDPE_LEVEL 3 44c9c54174SSheng Yang #define PT_DIRECTORY_LEVEL 2 45c9c54174SSheng Yang #define PT_PAGE_TABLE_LEVEL 1 46c9c54174SSheng Yang 4797ec8c06SFeng Wu #define PFERR_PRESENT_BIT 0 4897ec8c06SFeng Wu #define PFERR_WRITE_BIT 1 4997ec8c06SFeng Wu #define PFERR_USER_BIT 2 5097ec8c06SFeng Wu #define PFERR_RSVD_BIT 3 5197ec8c06SFeng Wu #define PFERR_FETCH_BIT 4 5297ec8c06SFeng Wu 5397ec8c06SFeng Wu #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) 5497ec8c06SFeng Wu #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) 5597ec8c06SFeng Wu #define PFERR_USER_MASK (1U << PFERR_USER_BIT) 5697ec8c06SFeng Wu #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) 5797ec8c06SFeng Wu #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) 581871c602SGleb Natapov 59*d1431483STiejun Chen static inline u64 rsvd_bits(int s, int e) 60*d1431483STiejun Chen { 61*d1431483STiejun Chen return ((1ULL << (e - s + 1)) - 1) << s; 62*d1431483STiejun Chen } 63*d1431483STiejun Chen 6494d8b056SMarcelo Tosatti int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 65ce88decfSXiao Guangrong void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 66b37fbea6SXiao Guangrong 67b37fbea6SXiao Guangrong /* 68b37fbea6SXiao Guangrong * Return values of handle_mmio_page_fault_common: 69b37fbea6SXiao Guangrong * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction 70b37fbea6SXiao Guangrong * directly. 71f8f55942SXiao Guangrong * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page 72f8f55942SXiao Guangrong * fault path update the mmio spte. 73b37fbea6SXiao Guangrong * RET_MMIO_PF_RETRY: let CPU fault again on the address. 74b37fbea6SXiao Guangrong * RET_MMIO_PF_BUG: bug is detected. 75b37fbea6SXiao Guangrong */ 76b37fbea6SXiao Guangrong enum { 77b37fbea6SXiao Guangrong RET_MMIO_PF_EMULATE = 1, 78f8f55942SXiao Guangrong RET_MMIO_PF_INVALID = 2, 79b37fbea6SXiao Guangrong RET_MMIO_PF_RETRY = 0, 80b37fbea6SXiao Guangrong RET_MMIO_PF_BUG = -1 81b37fbea6SXiao Guangrong }; 82b37fbea6SXiao Guangrong 83ce88decfSXiao Guangrong int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 848a3c1a33SPaolo Bonzini void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 858a3c1a33SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, 86155a97a3SNadav Har'El bool execonly); 8797ec8c06SFeng Wu void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 8897ec8c06SFeng Wu bool ept); 8994d8b056SMarcelo Tosatti 90e0df7b9fSDave Hansen static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 91e0df7b9fSDave Hansen { 925d218814SMarcelo Tosatti if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) 9349d5ca26SDave Hansen return kvm->arch.n_max_mmu_pages - 9449d5ca26SDave Hansen kvm->arch.n_used_mmu_pages; 955d218814SMarcelo Tosatti 965d218814SMarcelo Tosatti return 0; 97e0df7b9fSDave Hansen } 98e0df7b9fSDave Hansen 99edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) 100edf88417SAvi Kivity { 101edf88417SAvi Kivity if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) 102edf88417SAvi Kivity return 0; 103edf88417SAvi Kivity 104edf88417SAvi Kivity return kvm_mmu_load(vcpu); 105edf88417SAvi Kivity } 106edf88417SAvi Kivity 10743a3795aSAvi Kivity static inline int is_present_gpte(unsigned long pte) 10820c466b5SDong, Eddie { 10920c466b5SDong, Eddie return pte & PT_PRESENT_MASK; 11020c466b5SDong, Eddie } 11120c466b5SDong, Eddie 112198c74f4SXiao Guangrong /* 113198c74f4SXiao Guangrong * Currently, we have two sorts of write-protection, a) the first one 114198c74f4SXiao Guangrong * write-protects guest page to sync the guest modification, b) another one is 115198c74f4SXiao Guangrong * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences 116198c74f4SXiao Guangrong * between these two sorts are: 117198c74f4SXiao Guangrong * 1) the first case clears SPTE_MMU_WRITEABLE bit. 118198c74f4SXiao Guangrong * 2) the first case requires flushing tlb immediately avoiding corrupting 119198c74f4SXiao Guangrong * shadow page table between all vcpus so it should be in the protection of 120198c74f4SXiao Guangrong * mmu-lock. And the another case does not need to flush tlb until returning 121198c74f4SXiao Guangrong * the dirty bitmap to userspace since it only write-protects the page 122198c74f4SXiao Guangrong * logged in the bitmap, that means the page in the dirty bitmap is not 123198c74f4SXiao Guangrong * missed, so it can flush tlb out of mmu-lock. 124198c74f4SXiao Guangrong * 125198c74f4SXiao Guangrong * So, there is the problem: the first case can meet the corrupted tlb caused 126198c74f4SXiao Guangrong * by another case which write-protects pages but without flush tlb 127198c74f4SXiao Guangrong * immediately. In order to making the first case be aware this problem we let 128198c74f4SXiao Guangrong * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit 129198c74f4SXiao Guangrong * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. 130198c74f4SXiao Guangrong * 131198c74f4SXiao Guangrong * Anyway, whenever a spte is updated (only permission and status bits are 132198c74f4SXiao Guangrong * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes 133198c74f4SXiao Guangrong * readonly, if that happens, we need to flush tlb. Fortunately, 134198c74f4SXiao Guangrong * mmu_spte_update() has already handled it perfectly. 135198c74f4SXiao Guangrong * 136198c74f4SXiao Guangrong * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: 137198c74f4SXiao Guangrong * - if we want to see if it has writable tlb entry or if the spte can be 138198c74f4SXiao Guangrong * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most 139198c74f4SXiao Guangrong * case, otherwise 140198c74f4SXiao Guangrong * - if we fix page fault on the spte or do write-protection by dirty logging, 141198c74f4SXiao Guangrong * check PT_WRITABLE_MASK. 142198c74f4SXiao Guangrong * 143198c74f4SXiao Guangrong * TODO: introduce APIs to split these two cases. 144198c74f4SXiao Guangrong */ 145bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte) 146bebb106aSXiao Guangrong { 147bebb106aSXiao Guangrong return pte & PT_WRITABLE_MASK; 148bebb106aSXiao Guangrong } 149bebb106aSXiao Guangrong 150bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu) 151bebb106aSXiao Guangrong { 152bebb106aSXiao Guangrong return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 153bebb106aSXiao Guangrong } 154bebb106aSXiao Guangrong 15597d64b78SAvi Kivity /* 15697d64b78SAvi Kivity * Will a fault with a given page-fault error code (pfec) cause a permission 15797d64b78SAvi Kivity * fault with the given access (in ACC_* format)? 15897d64b78SAvi Kivity */ 15997ec8c06SFeng Wu static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 16097ec8c06SFeng Wu unsigned pte_access, unsigned pfec) 161bebb106aSXiao Guangrong { 16297ec8c06SFeng Wu int cpl = kvm_x86_ops->get_cpl(vcpu); 16397ec8c06SFeng Wu unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 16497ec8c06SFeng Wu 16597ec8c06SFeng Wu /* 16697ec8c06SFeng Wu * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. 16797ec8c06SFeng Wu * 16897ec8c06SFeng Wu * If CPL = 3, SMAP applies to all supervisor-mode data accesses 16997ec8c06SFeng Wu * (these are implicit supervisor accesses) regardless of the value 17097ec8c06SFeng Wu * of EFLAGS.AC. 17197ec8c06SFeng Wu * 17297ec8c06SFeng Wu * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving 17397ec8c06SFeng Wu * the result in X86_EFLAGS_AC. We then insert it in place of 17497ec8c06SFeng Wu * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, 17597ec8c06SFeng Wu * but it will be one in index if SMAP checks are being overridden. 17697ec8c06SFeng Wu * It is important to keep this branchless. 17797ec8c06SFeng Wu */ 17897ec8c06SFeng Wu unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); 17997ec8c06SFeng Wu int index = (pfec >> 1) + 18097ec8c06SFeng Wu (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 18197ec8c06SFeng Wu 18297ec8c06SFeng Wu return (mmu->permissions[index] >> pte_access) & 1; 183bebb106aSXiao Guangrong } 18497d64b78SAvi Kivity 1855304b8d3SXiao Guangrong void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 186edf88417SAvi Kivity #endif 187