16ca9a6f3SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
26ca9a6f3SSean Christopherson #ifndef __KVM_X86_MMU_INTERNAL_H
36ca9a6f3SSean Christopherson #define __KVM_X86_MMU_INTERNAL_H
46ca9a6f3SSean Christopherson
5985ab278SSean Christopherson #include <linux/types.h>
65a9624afSPaolo Bonzini #include <linux/kvm_host.h>
7985ab278SSean Christopherson #include <asm/kvm_host.h>
8985ab278SSean Christopherson
9870d4d4eSSean Christopherson #ifdef CONFIG_KVM_PROVE_MMU
1020ba462dSSean Christopherson #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
115a9624afSPaolo Bonzini #else
123328dfe0SSean Christopherson #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
135a9624afSPaolo Bonzini #endif
145a9624afSPaolo Bonzini
1542c88ff8SSean Christopherson /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
1642c88ff8SSean Christopherson #define __PT_LEVEL_SHIFT(level, bits_per_level) \
1742c88ff8SSean Christopherson (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
1842c88ff8SSean Christopherson #define __PT_INDEX(address, level, bits_per_level) \
1942c88ff8SSean Christopherson (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
2042c88ff8SSean Christopherson
2142c88ff8SSean Christopherson #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \
2242c88ff8SSean Christopherson ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
2342c88ff8SSean Christopherson
2442c88ff8SSean Christopherson #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \
2542c88ff8SSean Christopherson ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
2642c88ff8SSean Christopherson
2742c88ff8SSean Christopherson #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level))
2842c88ff8SSean Christopherson
29c834e5e4SSean Christopherson /*
30c834e5e4SSean Christopherson * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
31c834e5e4SSean Christopherson * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
32c834e5e4SSean Christopherson * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
33c834e5e4SSean Christopherson * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
34c834e5e4SSean Christopherson * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
35c834e5e4SSean Christopherson */
36c834e5e4SSean Christopherson #define INVALID_PAE_ROOT 0
37c834e5e4SSean Christopherson #define IS_VALID_PAE_ROOT(x) (!!(x))
38c834e5e4SSean Christopherson
kvm_mmu_get_dummy_root(void)390e3223d8SSean Christopherson static inline hpa_t kvm_mmu_get_dummy_root(void)
400e3223d8SSean Christopherson {
410e3223d8SSean Christopherson return my_zero_pfn(0) << PAGE_SHIFT;
420e3223d8SSean Christopherson }
430e3223d8SSean Christopherson
kvm_mmu_is_dummy_root(hpa_t shadow_page)440e3223d8SSean Christopherson static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
450e3223d8SSean Christopherson {
460e3223d8SSean Christopherson return is_zero_pfn(shadow_page >> PAGE_SHIFT);
470e3223d8SSean Christopherson }
480e3223d8SSean Christopherson
49c10743a1SSean Christopherson typedef u64 __rcu *tdp_ptep_t;
50c10743a1SSean Christopherson
51985ab278SSean Christopherson struct kvm_mmu_page {
521148bfc4SSean Christopherson /*
531148bfc4SSean Christopherson * Note, "link" through "spt" fit in a single 64 byte cache line on
541148bfc4SSean Christopherson * 64-bit kernels, keep it that way unless there's a reason not to.
551148bfc4SSean Christopherson */
56985ab278SSean Christopherson struct list_head link;
57985ab278SSean Christopherson struct hlist_node hash_link;
58985ab278SSean Christopherson
59ca41c34cSSean Christopherson bool tdp_mmu_page;
60985ab278SSean Christopherson bool unsync;
61*0df9dab8SSean Christopherson union {
62985ab278SSean Christopherson u8 mmu_valid_gen;
6355c510e2SSean Christopherson
64*0df9dab8SSean Christopherson /* Only accessed under slots_lock. */
65*0df9dab8SSean Christopherson bool tdp_mmu_scheduled_root_to_zap;
66*0df9dab8SSean Christopherson };
67*0df9dab8SSean Christopherson
6855c510e2SSean Christopherson /*
6955c510e2SSean Christopherson * The shadow page can't be replaced by an equivalent huge page
7055c510e2SSean Christopherson * because it is being used to map an executable page in the guest
7155c510e2SSean Christopherson * and the NX huge page mitigation is enabled.
7255c510e2SSean Christopherson */
7355c510e2SSean Christopherson bool nx_huge_page_disallowed;
74985ab278SSean Christopherson
75985ab278SSean Christopherson /*
76985ab278SSean Christopherson * The following two entries are used to key the shadow page in the
77985ab278SSean Christopherson * hash table.
78985ab278SSean Christopherson */
79985ab278SSean Christopherson union kvm_mmu_page_role role;
80985ab278SSean Christopherson gfn_t gfn;
81985ab278SSean Christopherson
82985ab278SSean Christopherson u64 *spt;
836a97575dSDavid Matlack
846a97575dSDavid Matlack /*
856a97575dSDavid Matlack * Stores the result of the guest translation being shadowed by each
866a97575dSDavid Matlack * SPTE. KVM shadows two types of guest translations: nGPA -> GPA
876a97575dSDavid Matlack * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
886a97575dSDavid Matlack * cases the result of the translation is a GPA and a set of access
896a97575dSDavid Matlack * constraints.
906a97575dSDavid Matlack *
916a97575dSDavid Matlack * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed
926a97575dSDavid Matlack * access permissions are stored in the lower bits. Note, for
936a97575dSDavid Matlack * convenience and uniformity across guests, the access permissions are
946a97575dSDavid Matlack * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format.
956a97575dSDavid Matlack */
966a97575dSDavid Matlack u64 *shadowed_translation;
976a97575dSDavid Matlack
9811cccf5cSBen Gardon /* Currently serving as active root */
9911cccf5cSBen Gardon union {
10011cccf5cSBen Gardon int root_count;
10111cccf5cSBen Gardon refcount_t tdp_mmu_root_count;
10211cccf5cSBen Gardon };
103985ab278SSean Christopherson unsigned int unsync_children;
104c10743a1SSean Christopherson union {
105985ab278SSean Christopherson struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
106c10743a1SSean Christopherson tdp_ptep_t ptep;
107c10743a1SSean Christopherson };
108985ab278SSean Christopherson DECLARE_BITMAP(unsync_child_bitmap, 512);
109985ab278SSean Christopherson
110428e9216SSean Christopherson /*
111428e9216SSean Christopherson * Tracks shadow pages that, if zapped, would allow KVM to create an NX
11255c510e2SSean Christopherson * huge page. A shadow page will have nx_huge_page_disallowed set but
11355c510e2SSean Christopherson * not be on the list if a huge page is disallowed for other reasons,
11455c510e2SSean Christopherson * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
11555c510e2SSean Christopherson * isn't properly aligned, etc...
116428e9216SSean Christopherson */
11755c510e2SSean Christopherson struct list_head possible_nx_huge_page_link;
118985ab278SSean Christopherson #ifdef CONFIG_X86_32
119985ab278SSean Christopherson /*
120985ab278SSean Christopherson * Used out of the mmu-lock to avoid reading spte values while an
121985ab278SSean Christopherson * update is in progress; see the comments in __get_spte_lockless().
122985ab278SSean Christopherson */
123985ab278SSean Christopherson int clear_spte_count;
124985ab278SSean Christopherson #endif
125985ab278SSean Christopherson
126985ab278SSean Christopherson /* Number of writes since the last time traversal visited this page. */
127985ab278SSean Christopherson atomic_t write_flooding_count;
12802c00b3aSBen Gardon
129897218ffSPaolo Bonzini #ifdef CONFIG_X86_64
130d9f6e12fSIngo Molnar /* Used for freeing the page asynchronously if it is a TDP MMU page. */
1317cca2d0bSBen Gardon struct rcu_head rcu_head;
132897218ffSPaolo Bonzini #endif
133985ab278SSean Christopherson };
134985ab278SSean Christopherson
13502c00b3aSBen Gardon extern struct kmem_cache *mmu_page_header_cache;
13602c00b3aSBen Gardon
kvm_mmu_role_as_id(union kvm_mmu_page_role role)137a3f15bdaSSean Christopherson static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
138a3f15bdaSSean Christopherson {
139a3f15bdaSSean Christopherson return role.smm ? 1 : 0;
140a3f15bdaSSean Christopherson }
141a3f15bdaSSean Christopherson
kvm_mmu_page_as_id(struct kvm_mmu_page * sp)14208889894SSean Christopherson static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
14308889894SSean Christopherson {
144a3f15bdaSSean Christopherson return kvm_mmu_role_as_id(sp->role);
14508889894SSean Christopherson }
14608889894SSean Christopherson
kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page * sp)147ce92ef76SSean Christopherson static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
1485a9624afSPaolo Bonzini {
1495a9624afSPaolo Bonzini /*
15044ac5958SSean Christopherson * When using the EPT page-modification log, the GPAs in the CPU dirty
15144ac5958SSean Christopherson * log would come from L2 rather than L1. Therefore, we need to rely
15244ac5958SSean Christopherson * on write protection to record dirty pages, which bypasses PML, since
15344ac5958SSean Christopherson * writes now result in a vmexit. Note, the check on CPU dirty logging
15444ac5958SSean Christopherson * being enabled is mandatory as the bits used to denote WP-only SPTEs
155ce92ef76SSean Christopherson * are reserved for PAE paging (32-bit KVM).
1565a9624afSPaolo Bonzini */
157ce92ef76SSean Christopherson return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
1585a9624afSPaolo Bonzini }
1595a9624afSPaolo Bonzini
gfn_round_for_level(gfn_t gfn,int level)160c667a3baSHou Wenlong static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
161c667a3baSHou Wenlong {
162c667a3baSHou Wenlong return gfn & -KVM_PAGES_PER_HPAGE(level);
163c667a3baSHou Wenlong }
164c667a3baSHou Wenlong
1658283e36aSBen Gardon int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
1662839180cSPaolo Bonzini gfn_t gfn, bool can_unsync, bool prefetch);
1675a9624afSPaolo Bonzini
168269e9552SHamza Mahfooz void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
169269e9552SHamza Mahfooz void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
1706ca9a6f3SSean Christopherson bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1713ad93562SKeqian Zhu struct kvm_memory_slot *slot, u64 gfn,
1723ad93562SKeqian Zhu int min_level);
1739ffe9265SHou Wenlong
1749ffe9265SHou Wenlong /* Flush the given page (huge or not) of guest memory. */
kvm_flush_remote_tlbs_gfn(struct kvm * kvm,gfn_t gfn,int level)1759ffe9265SHou Wenlong static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
1769ffe9265SHou Wenlong {
1778c63e8c2SDavid Matlack kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
1789ffe9265SHou Wenlong KVM_PAGES_PER_HPAGE(level));
1799ffe9265SHou Wenlong }
1809ffe9265SHou Wenlong
1813bcd0662SPeter Xu unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
1826ca9a6f3SSean Christopherson
1838a009d5bSSean Christopherson extern int nx_huge_pages;
is_nx_huge_page_enabled(struct kvm * kvm)184084cc29fSBen Gardon static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
1858a009d5bSSean Christopherson {
186084cc29fSBen Gardon return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;
1878a009d5bSSean Christopherson }
1888a009d5bSSean Christopherson
1898a009d5bSSean Christopherson struct kvm_page_fault {
1908a009d5bSSean Christopherson /* arguments to kvm_mmu_do_page_fault. */
1918a009d5bSSean Christopherson const gpa_t addr;
1928a009d5bSSean Christopherson const u32 error_code;
1938a009d5bSSean Christopherson const bool prefetch;
1948a009d5bSSean Christopherson
1958a009d5bSSean Christopherson /* Derived from error_code. */
1968a009d5bSSean Christopherson const bool exec;
1978a009d5bSSean Christopherson const bool write;
1988a009d5bSSean Christopherson const bool present;
1998a009d5bSSean Christopherson const bool rsvd;
2008a009d5bSSean Christopherson const bool user;
2018a009d5bSSean Christopherson
2028a009d5bSSean Christopherson /* Derived from mmu and global state. */
2038a009d5bSSean Christopherson const bool is_tdp;
2048a009d5bSSean Christopherson const bool nx_huge_page_workaround_enabled;
2058a009d5bSSean Christopherson
206bb18842eSBen Gardon /*
2078a009d5bSSean Christopherson * Whether a >4KB mapping can be created or is forbidden due to NX
2088a009d5bSSean Christopherson * hugepages.
2098a009d5bSSean Christopherson */
2108a009d5bSSean Christopherson bool huge_page_disallowed;
2118a009d5bSSean Christopherson
2128a009d5bSSean Christopherson /*
2138a009d5bSSean Christopherson * Maximum page size that can be created for this fault; input to
2146c882ef4SDavid Matlack * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
2158a009d5bSSean Christopherson */
2168a009d5bSSean Christopherson u8 max_level;
2178a009d5bSSean Christopherson
2188a009d5bSSean Christopherson /*
2198a009d5bSSean Christopherson * Page size that can be created based on the max_level and the
2208a009d5bSSean Christopherson * page size used by the host mapping.
2218a009d5bSSean Christopherson */
2228a009d5bSSean Christopherson u8 req_level;
2238a009d5bSSean Christopherson
2248a009d5bSSean Christopherson /*
2258a009d5bSSean Christopherson * Page size that will be created based on the req_level and
2268a009d5bSSean Christopherson * huge_page_disallowed.
2278a009d5bSSean Christopherson */
2288a009d5bSSean Christopherson u8 goal_level;
2298a009d5bSSean Christopherson
2308a009d5bSSean Christopherson /* Shifted addr, or result of guest page table walk if addr is a gva. */
2318a009d5bSSean Christopherson gfn_t gfn;
2328a009d5bSSean Christopherson
2338a009d5bSSean Christopherson /* The memslot containing gfn. May be NULL. */
2348a009d5bSSean Christopherson struct kvm_memory_slot *slot;
2358a009d5bSSean Christopherson
2368a009d5bSSean Christopherson /* Outputs of kvm_faultin_pfn. */
237ba6e3fe2SDavid Matlack unsigned long mmu_seq;
2388a009d5bSSean Christopherson kvm_pfn_t pfn;
2398a009d5bSSean Christopherson hva_t hva;
2408a009d5bSSean Christopherson bool map_writable;
241258d985fSSean Christopherson
242258d985fSSean Christopherson /*
243258d985fSSean Christopherson * Indicates the guest is trying to write a gfn that contains one or
244258d985fSSean Christopherson * more of the PTEs used to translate the write itself, i.e. the access
245258d985fSSean Christopherson * is changing its own translation in the guest page tables.
246258d985fSSean Christopherson */
247258d985fSSean Christopherson bool write_fault_to_shadow_pgtable;
2488a009d5bSSean Christopherson };
2498a009d5bSSean Christopherson
2508a009d5bSSean Christopherson int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
2518a009d5bSSean Christopherson
2528a009d5bSSean Christopherson /*
2538a009d5bSSean Christopherson * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
2548a009d5bSSean Christopherson * and of course kvm_mmu_do_page_fault().
255bb18842eSBen Gardon *
2565276c616SSean Christopherson * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
257bb18842eSBen Gardon * RET_PF_RETRY: let CPU fault again on the address.
258bb18842eSBen Gardon * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
259bb18842eSBen Gardon * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
260bb18842eSBen Gardon * RET_PF_FIXED: The faulting entry has been fixed.
261bb18842eSBen Gardon * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
26261bcd360SDavid Matlack *
26361bcd360SDavid Matlack * Any names added to this enum should be exported to userspace for use in
26461bcd360SDavid Matlack * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
2655276c616SSean Christopherson *
2665276c616SSean Christopherson * Note, all values must be greater than or equal to zero so as not to encroach
2675276c616SSean Christopherson * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which
2685276c616SSean Christopherson * will allow for efficient machine code when checking for CONTINUE, e.g.
2695276c616SSean Christopherson * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
270bb18842eSBen Gardon */
271bb18842eSBen Gardon enum {
2725276c616SSean Christopherson RET_PF_CONTINUE = 0,
2735276c616SSean Christopherson RET_PF_RETRY,
274bb18842eSBen Gardon RET_PF_EMULATE,
275bb18842eSBen Gardon RET_PF_INVALID,
276bb18842eSBen Gardon RET_PF_FIXED,
277bb18842eSBen Gardon RET_PF_SPURIOUS,
278bb18842eSBen Gardon };
279bb18842eSBen Gardon
kvm_mmu_do_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u32 err,bool prefetch,int * emulation_type)2808a009d5bSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
281258d985fSSean Christopherson u32 err, bool prefetch, int *emulation_type)
2828a009d5bSSean Christopherson {
2838a009d5bSSean Christopherson struct kvm_page_fault fault = {
2848a009d5bSSean Christopherson .addr = cr2_or_gpa,
2858a009d5bSSean Christopherson .error_code = err,
2868a009d5bSSean Christopherson .exec = err & PFERR_FETCH_MASK,
2878a009d5bSSean Christopherson .write = err & PFERR_WRITE_MASK,
2888a009d5bSSean Christopherson .present = err & PFERR_PRESENT_MASK,
2898a009d5bSSean Christopherson .rsvd = err & PFERR_RSVD_MASK,
2908a009d5bSSean Christopherson .user = err & PFERR_USER_MASK,
2918a009d5bSSean Christopherson .prefetch = prefetch,
2928a009d5bSSean Christopherson .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
293084cc29fSBen Gardon .nx_huge_page_workaround_enabled =
294084cc29fSBen Gardon is_nx_huge_page_enabled(vcpu->kvm),
2958a009d5bSSean Christopherson
2968a009d5bSSean Christopherson .max_level = KVM_MAX_HUGEPAGE_LEVEL,
2978a009d5bSSean Christopherson .req_level = PG_LEVEL_4K,
2988a009d5bSSean Christopherson .goal_level = PG_LEVEL_4K,
2998a009d5bSSean Christopherson };
3001075d41eSSean Christopherson int r;
3011075d41eSSean Christopherson
302e5e6f8d2SDavid Matlack if (vcpu->arch.mmu->root_role.direct) {
303e5e6f8d2SDavid Matlack fault.gfn = fault.addr >> PAGE_SHIFT;
304e5e6f8d2SDavid Matlack fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
305e5e6f8d2SDavid Matlack }
306e5e6f8d2SDavid Matlack
3071075d41eSSean Christopherson /*
3081075d41eSSean Christopherson * Async #PF "faults", a.k.a. prefetch faults, are not faults from the
3091075d41eSSean Christopherson * guest perspective and have already been counted at the time of the
3101075d41eSSean Christopherson * original fault.
3111075d41eSSean Christopherson */
3121075d41eSSean Christopherson if (!prefetch)
3131075d41eSSean Christopherson vcpu->stat.pf_taken++;
3148d5265b1SSean Christopherson
3158d5265b1SSean Christopherson if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
3161075d41eSSean Christopherson r = kvm_tdp_page_fault(vcpu, &fault);
3171075d41eSSean Christopherson else
3181075d41eSSean Christopherson r = vcpu->arch.mmu->page_fault(vcpu, &fault);
3198d5265b1SSean Christopherson
320258d985fSSean Christopherson if (fault.write_fault_to_shadow_pgtable && emulation_type)
321258d985fSSean Christopherson *emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
322258d985fSSean Christopherson
3231075d41eSSean Christopherson /*
3241075d41eSSean Christopherson * Similar to above, prefetch faults aren't truly spurious, and the
3251075d41eSSean Christopherson * async #PF path doesn't do emulation. Do count faults that are fixed
3261075d41eSSean Christopherson * by the async #PF handler though, otherwise they'll never be counted.
3271075d41eSSean Christopherson */
3281075d41eSSean Christopherson if (r == RET_PF_FIXED)
3291075d41eSSean Christopherson vcpu->stat.pf_fixed++;
3301075d41eSSean Christopherson else if (prefetch)
3311075d41eSSean Christopherson ;
3321075d41eSSean Christopherson else if (r == RET_PF_EMULATE)
3331075d41eSSean Christopherson vcpu->stat.pf_emulate++;
3341075d41eSSean Christopherson else if (r == RET_PF_SPURIOUS)
3351075d41eSSean Christopherson vcpu->stat.pf_spurious++;
3361075d41eSSean Christopherson return r;
3378a009d5bSSean Christopherson }
3388a009d5bSSean Christopherson
3398ca6f063SBen Gardon int kvm_mmu_max_mapping_level(struct kvm *kvm,
3408ca6f063SBen Gardon const struct kvm_memory_slot *slot, gfn_t gfn,
341a8ac499bSSean Christopherson int max_level);
34273a3c659SPaolo Bonzini void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
343536f0e6aSPaolo Bonzini void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
344bb18842eSBen Gardon
345bb18842eSBen Gardon void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
346bb18842eSBen Gardon
34761f94478SSean Christopherson void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
34861f94478SSean Christopherson void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
34929cf0f50SBen Gardon
3506ca9a6f3SSean Christopherson #endif /* __KVM_X86_MMU_INTERNAL_H */
351