xref: /openbmc/linux/arch/x86/kvm/mmu.h (revision 4f4aa80e3b882cbbafdf95ebc018c72b10879dbb)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
789786147SMohammed Gamal #include "cpuid.h"
8edf88417SAvi Kivity 
98c6d6adcSSheng Yang #define PT64_PT_BITS 9
108c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
118c6d6adcSSheng Yang #define PT32_PT_BITS 10
128c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
15be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
168c6d6adcSSheng Yang 
178c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
188c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
208c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
218c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
221b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
231b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
248ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
258ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
266fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
276fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
288c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
298c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
308c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
318c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
328c6d6adcSSheng Yang 
338c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
348c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
358c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
368c6d6adcSSheng Yang 
378c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
388c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
398c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
408c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
418c6d6adcSSheng Yang 
42855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
432a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
448c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
458c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
468c6d6adcSSheng Yang 
47a91a7c70SLai Jiangshan #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
48a91a7c70SLai Jiangshan 			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
4920f632bdSSean Christopherson 
5020f632bdSSean Christopherson #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
51d6174299SPaolo Bonzini #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
5220f632bdSSean Christopherson 
53eb79cd00SSean Christopherson static __always_inline u64 rsvd_bits(int s, int e)
54d1431483STiejun Chen {
55eb79cd00SSean Christopherson 	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
56eb79cd00SSean Christopherson 
57eb79cd00SSean Christopherson 	if (__builtin_constant_p(e))
58eb79cd00SSean Christopherson 		BUILD_BUG_ON(e > 63);
59eb79cd00SSean Christopherson 	else
60eb79cd00SSean Christopherson 		e &= 63;
61eb79cd00SSean Christopherson 
62d1cd3ce9SYu Zhang 	if (e < s)
63d1cd3ce9SYu Zhang 		return 0;
64d1cd3ce9SYu Zhang 
652f80d502SPaolo Bonzini 	return ((2ULL << (e - s)) - 1) << s;
66d1431483STiejun Chen }
67d1431483STiejun Chen 
688120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
69e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
70b37fbea6SXiao Guangrong 
71c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu);
72dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
73dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
74ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
75cc022ae1SLai Jiangshan 			     int huge_page_level, bool accessed_dirty,
76cc022ae1SLai Jiangshan 			     gpa_t new_eptp);
779bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
781261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
79d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
8094d8b056SMarcelo Tosatti 
8161a1773eSSean Christopherson int kvm_mmu_load(struct kvm_vcpu *vcpu);
8261a1773eSSean Christopherson void kvm_mmu_unload(struct kvm_vcpu *vcpu);
83527d5cd7SSean Christopherson void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
8461a1773eSSean Christopherson void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
8561b05a9fSLai Jiangshan void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
8661a1773eSSean Christopherson 
87edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
88edf88417SAvi Kivity {
89b9e5603cSPaolo Bonzini 	if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
90edf88417SAvi Kivity 		return 0;
91edf88417SAvi Kivity 
92edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
93edf88417SAvi Kivity }
94edf88417SAvi Kivity 
95c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
96c9470a2eSJunaid Shahid {
97c9470a2eSJunaid Shahid 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
98c9470a2eSJunaid Shahid 
99c9470a2eSJunaid Shahid 	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
100c9470a2eSJunaid Shahid 	       ? cr3 & X86_CR3_PCID_MASK
101c9470a2eSJunaid Shahid 	       : 0;
102c9470a2eSJunaid Shahid }
103c9470a2eSJunaid Shahid 
104c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
105c9470a2eSJunaid Shahid {
106c9470a2eSJunaid Shahid 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
107c9470a2eSJunaid Shahid }
108c9470a2eSJunaid Shahid 
109689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
1106e42782fSJunaid Shahid {
111b9e5603cSPaolo Bonzini 	u64 root_hpa = vcpu->arch.mmu->root.hpa;
1122a40b900SSean Christopherson 
1132a40b900SSean Christopherson 	if (!VALID_PAGE(root_hpa))
1142a40b900SSean Christopherson 		return;
1152a40b900SSean Christopherson 
116e83bc09cSSean Christopherson 	static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
1172a40b900SSean Christopherson 					  vcpu->arch.mmu->shadow_root_level);
1186e42782fSJunaid Shahid }
1196e42782fSJunaid Shahid 
1206defd9bbSPaolo Bonzini struct kvm_page_fault {
1216defd9bbSPaolo Bonzini 	/* arguments to kvm_mmu_do_page_fault.  */
1226defd9bbSPaolo Bonzini 	const gpa_t addr;
1236defd9bbSPaolo Bonzini 	const u32 error_code;
1242839180cSPaolo Bonzini 	const bool prefetch;
1256defd9bbSPaolo Bonzini 
1266defd9bbSPaolo Bonzini 	/* Derived from error_code.  */
1276defd9bbSPaolo Bonzini 	const bool exec;
1286defd9bbSPaolo Bonzini 	const bool write;
1296defd9bbSPaolo Bonzini 	const bool present;
1306defd9bbSPaolo Bonzini 	const bool rsvd;
1316defd9bbSPaolo Bonzini 	const bool user;
1326defd9bbSPaolo Bonzini 
13373a3c659SPaolo Bonzini 	/* Derived from mmu and global state.  */
1346defd9bbSPaolo Bonzini 	const bool is_tdp;
13573a3c659SPaolo Bonzini 	const bool nx_huge_page_workaround_enabled;
1364326e57eSPaolo Bonzini 
13773a3c659SPaolo Bonzini 	/*
13873a3c659SPaolo Bonzini 	 * Whether a >4KB mapping can be created or is forbidden due to NX
13973a3c659SPaolo Bonzini 	 * hugepages.
14073a3c659SPaolo Bonzini 	 */
14173a3c659SPaolo Bonzini 	bool huge_page_disallowed;
14273a3c659SPaolo Bonzini 
14373a3c659SPaolo Bonzini 	/*
14473a3c659SPaolo Bonzini 	 * Maximum page size that can be created for this fault; input to
14573a3c659SPaolo Bonzini 	 * FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
14673a3c659SPaolo Bonzini 	 */
1474326e57eSPaolo Bonzini 	u8 max_level;
148b8a5d551SPaolo Bonzini 
14973a3c659SPaolo Bonzini 	/*
15073a3c659SPaolo Bonzini 	 * Page size that can be created based on the max_level and the
15173a3c659SPaolo Bonzini 	 * page size used by the host mapping.
15273a3c659SPaolo Bonzini 	 */
15373a3c659SPaolo Bonzini 	u8 req_level;
15473a3c659SPaolo Bonzini 
15573a3c659SPaolo Bonzini 	/*
15673a3c659SPaolo Bonzini 	 * Page size that will be created based on the req_level and
15773a3c659SPaolo Bonzini 	 * huge_page_disallowed.
15873a3c659SPaolo Bonzini 	 */
15973a3c659SPaolo Bonzini 	u8 goal_level;
16073a3c659SPaolo Bonzini 
161b8a5d551SPaolo Bonzini 	/* Shifted addr, or result of guest page table walk if addr is a gva.  */
162b8a5d551SPaolo Bonzini 	gfn_t gfn;
1633647cd04SPaolo Bonzini 
164e710c5f6SDavid Matlack 	/* The memslot containing gfn. May be NULL. */
165e710c5f6SDavid Matlack 	struct kvm_memory_slot *slot;
166e710c5f6SDavid Matlack 
1673647cd04SPaolo Bonzini 	/* Outputs of kvm_faultin_pfn.  */
1683647cd04SPaolo Bonzini 	kvm_pfn_t pfn;
1693647cd04SPaolo Bonzini 	hva_t hva;
1703647cd04SPaolo Bonzini 	bool map_writable;
1716defd9bbSPaolo Bonzini };
1726defd9bbSPaolo Bonzini 
173c501040aSPaolo Bonzini int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
1747a02674dSSean Christopherson 
17573a3c659SPaolo Bonzini extern int nx_huge_pages;
17673a3c659SPaolo Bonzini static inline bool is_nx_huge_page_enabled(void)
17773a3c659SPaolo Bonzini {
17873a3c659SPaolo Bonzini 	return READ_ONCE(nx_huge_pages);
17973a3c659SPaolo Bonzini }
18073a3c659SPaolo Bonzini 
1817a02674dSSean Christopherson static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
1822839180cSPaolo Bonzini 					u32 err, bool prefetch)
1837a02674dSSean Christopherson {
1846defd9bbSPaolo Bonzini 	struct kvm_page_fault fault = {
1856defd9bbSPaolo Bonzini 		.addr = cr2_or_gpa,
1866defd9bbSPaolo Bonzini 		.error_code = err,
1876defd9bbSPaolo Bonzini 		.exec = err & PFERR_FETCH_MASK,
1886defd9bbSPaolo Bonzini 		.write = err & PFERR_WRITE_MASK,
1896defd9bbSPaolo Bonzini 		.present = err & PFERR_PRESENT_MASK,
1906defd9bbSPaolo Bonzini 		.rsvd = err & PFERR_RSVD_MASK,
1916defd9bbSPaolo Bonzini 		.user = err & PFERR_USER_MASK,
1922839180cSPaolo Bonzini 		.prefetch = prefetch,
1936defd9bbSPaolo Bonzini 		.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
19473a3c659SPaolo Bonzini 		.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
1954326e57eSPaolo Bonzini 
1964326e57eSPaolo Bonzini 		.max_level = KVM_MAX_HUGEPAGE_LEVEL,
19773a3c659SPaolo Bonzini 		.req_level = PG_LEVEL_4K,
19873a3c659SPaolo Bonzini 		.goal_level = PG_LEVEL_4K,
1996defd9bbSPaolo Bonzini 	};
2007a02674dSSean Christopherson #ifdef CONFIG_RETPOLINE
2016defd9bbSPaolo Bonzini 	if (fault.is_tdp)
202c501040aSPaolo Bonzini 		return kvm_tdp_page_fault(vcpu, &fault);
2037a02674dSSean Christopherson #endif
204c501040aSPaolo Bonzini 	return vcpu->arch.mmu->page_fault(vcpu, &fault);
2057a02674dSSean Christopherson }
2067a02674dSSean Christopherson 
207198c74f4SXiao Guangrong /*
208f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
209f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
210f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
211f13577e8SPaolo Bonzini  *
212f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
213f13577e8SPaolo Bonzini  * if the access faults.
21497d64b78SAvi Kivity  */
215f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
216be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
2175b22bbe7SLai Jiangshan 				  u64 access)
218bebb106aSXiao Guangrong {
2195b22bbe7SLai Jiangshan 	/* strip nested paging fault error codes */
2205b22bbe7SLai Jiangshan 	unsigned int pfec = access;
221b3646477SJason Baron 	unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
22297ec8c06SFeng Wu 
22397ec8c06SFeng Wu 	/*
224*4f4aa80eSLai Jiangshan 	 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
225*4f4aa80eSLai Jiangshan 	 * For implicit supervisor accesses, SMAP cannot be overridden.
22697ec8c06SFeng Wu 	 *
227*4f4aa80eSLai Jiangshan 	 * SMAP works on supervisor accesses only, and not_smap can
228*4f4aa80eSLai Jiangshan 	 * be set or not set when user access with neither has any bearing
229*4f4aa80eSLai Jiangshan 	 * on the result.
23097ec8c06SFeng Wu 	 *
231*4f4aa80eSLai Jiangshan 	 * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
232*4f4aa80eSLai Jiangshan 	 * this bit will always be zero in pfec, but it will be one in index
233*4f4aa80eSLai Jiangshan 	 * if SMAP checks are being disabled.
23497ec8c06SFeng Wu 	 */
235*4f4aa80eSLai Jiangshan 	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
236*4f4aa80eSLai Jiangshan 	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
237*4f4aa80eSLai Jiangshan 	int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
238be94f6b7SHuaitong Han 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
2397a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
24097ec8c06SFeng Wu 
241be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
242be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
243be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
244be94f6b7SHuaitong Han 
245be94f6b7SHuaitong Han 		/*
246be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
247be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
248be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
249be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
250be94f6b7SHuaitong Han 		*/
251b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
252be94f6b7SHuaitong Han 
253be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
2547a98205dSXiao Guangrong 		offset = (pfec & ~1) +
255be94f6b7SHuaitong Han 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
256be94f6b7SHuaitong Han 
257be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
2587a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
259be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
260be94f6b7SHuaitong Han 	}
261be94f6b7SHuaitong Han 
2627a98205dSXiao Guangrong 	return -(u32)fault & errcode;
263bebb106aSXiao Guangrong }
26497d64b78SAvi Kivity 
265efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
266547ffaedSXiao Guangrong 
2676ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
2681aa9b957SJunaid Shahid 
2691aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm);
2701aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
2711aa9b957SJunaid Shahid 
2721e76a3ceSDavid Stevens static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
273e2209710SBen Gardon {
274d501f747SBen Gardon 	/*
2751e76a3ceSDavid Stevens 	 * Read shadow_root_allocated before related pointers. Hence, threads
2761e76a3ceSDavid Stevens 	 * reading shadow_root_allocated in any lock context are guaranteed to
2771e76a3ceSDavid Stevens 	 * see the pointers. Pairs with smp_store_release in
2781e76a3ceSDavid Stevens 	 * mmu_first_shadow_root_alloc.
279d501f747SBen Gardon 	 */
2801e76a3ceSDavid Stevens 	return smp_load_acquire(&kvm->arch.shadow_root_allocated);
2811e76a3ceSDavid Stevens }
2821e76a3ceSDavid Stevens 
2831e76a3ceSDavid Stevens #ifdef CONFIG_X86_64
2841e76a3ceSDavid Stevens static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
2851e76a3ceSDavid Stevens #else
2861e76a3ceSDavid Stevens static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
2871e76a3ceSDavid Stevens #endif
2881e76a3ceSDavid Stevens 
2891e76a3ceSDavid Stevens static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
2901e76a3ceSDavid Stevens {
2911e76a3ceSDavid Stevens 	return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
292e2209710SBen Gardon }
293e2209710SBen Gardon 
2944139b197SPeter Xu static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
2954139b197SPeter Xu {
2964139b197SPeter Xu 	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
2974139b197SPeter Xu 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
2984139b197SPeter Xu 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
2994139b197SPeter Xu }
3004139b197SPeter Xu 
3014139b197SPeter Xu static inline unsigned long
3024139b197SPeter Xu __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
3034139b197SPeter Xu 		      int level)
3044139b197SPeter Xu {
3054139b197SPeter Xu 	return gfn_to_index(slot->base_gfn + npages - 1,
3064139b197SPeter Xu 			    slot->base_gfn, level) + 1;
3074139b197SPeter Xu }
3084139b197SPeter Xu 
3094139b197SPeter Xu static inline unsigned long
3104139b197SPeter Xu kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
3114139b197SPeter Xu {
3124139b197SPeter Xu 	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
3134139b197SPeter Xu }
3144139b197SPeter Xu 
31571f51d2cSMingwei Zhang static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
31671f51d2cSMingwei Zhang {
31771f51d2cSMingwei Zhang 	atomic64_add(count, &kvm->stat.pages[level - 1]);
31871f51d2cSMingwei Zhang }
319c59a0f57SLai Jiangshan 
3205b22bbe7SLai Jiangshan gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
321c59a0f57SLai Jiangshan 			   struct x86_exception *exception);
322c59a0f57SLai Jiangshan 
323c59a0f57SLai Jiangshan static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
324c59a0f57SLai Jiangshan 				      struct kvm_mmu *mmu,
3255b22bbe7SLai Jiangshan 				      gpa_t gpa, u64 access,
326c59a0f57SLai Jiangshan 				      struct x86_exception *exception)
327c59a0f57SLai Jiangshan {
328c59a0f57SLai Jiangshan 	if (mmu != &vcpu->arch.nested_mmu)
329c59a0f57SLai Jiangshan 		return gpa;
330c59a0f57SLai Jiangshan 	return translate_nested_gpa(vcpu, gpa, access, exception);
331c59a0f57SLai Jiangshan }
332edf88417SAvi Kivity #endif
333