1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_MMU_INTERNAL_H 3 #define __KVM_X86_MMU_INTERNAL_H 4 5 #include <linux/types.h> 6 #include <linux/kvm_host.h> 7 #include <asm/kvm_host.h> 8 9 #undef MMU_DEBUG 10 11 #ifdef MMU_DEBUG 12 extern bool dbg; 13 14 #define pgprintk(x...) do { if (dbg) printk(x); } while (0) 15 #define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) 16 #define MMU_WARN_ON(x) WARN_ON(x) 17 #else 18 #define pgprintk(x...) do { } while (0) 19 #define rmap_printk(x...) do { } while (0) 20 #define MMU_WARN_ON(x) do { } while (0) 21 #endif 22 23 struct kvm_mmu_page { 24 struct list_head link; 25 struct hlist_node hash_link; 26 struct list_head lpage_disallowed_link; 27 28 bool unsync; 29 u8 mmu_valid_gen; 30 bool mmio_cached; 31 bool lpage_disallowed; /* Can't be replaced by an equiv large page */ 32 33 /* 34 * The following two entries are used to key the shadow page in the 35 * hash table. 36 */ 37 union kvm_mmu_page_role role; 38 gfn_t gfn; 39 40 u64 *spt; 41 /* hold the gfn of each spte inside spt */ 42 gfn_t *gfns; 43 int root_count; /* Currently serving as active root */ 44 unsigned int unsync_children; 45 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 46 DECLARE_BITMAP(unsync_child_bitmap, 512); 47 48 #ifdef CONFIG_X86_32 49 /* 50 * Used out of the mmu-lock to avoid reading spte values while an 51 * update is in progress; see the comments in __get_spte_lockless(). 52 */ 53 int clear_spte_count; 54 #endif 55 56 /* Number of writes since the last time traversal visited this page. */ 57 atomic_t write_flooding_count; 58 59 #ifdef CONFIG_X86_64 60 bool tdp_mmu_page; 61 62 /* Used for freeing the page asyncronously if it is a TDP MMU page. */ 63 struct rcu_head rcu_head; 64 #endif 65 }; 66 67 extern struct kmem_cache *mmu_page_header_cache; 68 69 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) 70 { 71 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 72 73 return (struct kvm_mmu_page *)page_private(page); 74 } 75 76 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) 77 { 78 return to_shadow_page(__pa(sptep)); 79 } 80 81 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 82 { 83 return sp->role.smm ? 1 : 0; 84 } 85 86 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) 87 { 88 /* 89 * When using the EPT page-modification log, the GPAs in the CPU dirty 90 * log would come from L2 rather than L1. Therefore, we need to rely 91 * on write protection to record dirty pages, which bypasses PML, since 92 * writes now result in a vmexit. Note, the check on CPU dirty logging 93 * being enabled is mandatory as the bits used to denote WP-only SPTEs 94 * are reserved for NPT w/ PAE (32-bit KVM). 95 */ 96 return vcpu->arch.mmu == &vcpu->arch.guest_mmu && 97 kvm_x86_ops.cpu_dirty_log_size; 98 } 99 100 bool is_nx_huge_page_enabled(void); 101 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 102 bool can_unsync); 103 104 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 105 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 106 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 107 struct kvm_memory_slot *slot, u64 gfn); 108 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, 109 u64 start_gfn, u64 pages); 110 111 static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) 112 { 113 BUG_ON(!sp->root_count); 114 lockdep_assert_held(&kvm->mmu_lock); 115 116 ++sp->root_count; 117 } 118 119 static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) 120 { 121 lockdep_assert_held(&kvm->mmu_lock); 122 --sp->root_count; 123 124 return !sp->root_count; 125 } 126 127 /* 128 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). 129 * 130 * RET_PF_RETRY: let CPU fault again on the address. 131 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 132 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 133 * RET_PF_FIXED: The faulting entry has been fixed. 134 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. 135 */ 136 enum { 137 RET_PF_RETRY = 0, 138 RET_PF_EMULATE, 139 RET_PF_INVALID, 140 RET_PF_FIXED, 141 RET_PF_SPURIOUS, 142 }; 143 144 /* Bits which may be returned by set_spte() */ 145 #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) 146 #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) 147 #define SET_SPTE_SPURIOUS BIT(2) 148 149 int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, 150 gfn_t gfn, kvm_pfn_t pfn, int max_level); 151 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, 152 int max_level, kvm_pfn_t *pfnp, 153 bool huge_page_disallowed, int *req_level); 154 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, 155 kvm_pfn_t *pfnp, int *goal_levelp); 156 157 bool is_nx_huge_page_enabled(void); 158 159 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 160 161 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); 162 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); 163 164 #endif /* __KVM_X86_MMU_INTERNAL_H */ 165