xref: /openbmc/linux/arch/x86/kvm/mmu.h (revision 41d6af119206e98764b4ae6d264d63acefcf851e)
1edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
2edf88417SAvi Kivity #define __KVM_X86_MMU_H
3edf88417SAvi Kivity 
4edf88417SAvi Kivity #include <linux/kvm_host.h>
5edf88417SAvi Kivity 
68c6d6adcSSheng Yang #define PT64_PT_BITS 9
78c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
88c6d6adcSSheng Yang #define PT32_PT_BITS 10
98c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
108c6d6adcSSheng Yang 
118c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
128c6d6adcSSheng Yang 
138c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
148c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
158c6d6adcSSheng Yang #define PT_USER_MASK (1ULL << 2)
168c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
178c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
181b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
191b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
208c6d6adcSSheng Yang #define PT_DIRTY_MASK (1ULL << 6)
218c6d6adcSSheng Yang #define PT_PAGE_SIZE_MASK (1ULL << 7)
228c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
238c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
248c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
258c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
268c6d6adcSSheng Yang 
278c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
288c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
298c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
308c6d6adcSSheng Yang 
318c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4
328c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13
338c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \
348c6d6adcSSheng Yang 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
358c6d6adcSSheng Yang 
368c6d6adcSSheng Yang #define PT64_ROOT_LEVEL 4
378c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
388c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
398c6d6adcSSheng Yang 
40edf88417SAvi Kivity static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
41edf88417SAvi Kivity {
42edf88417SAvi Kivity 	if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
43edf88417SAvi Kivity 		__kvm_mmu_free_some_pages(vcpu);
44edf88417SAvi Kivity }
45edf88417SAvi Kivity 
46edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
47edf88417SAvi Kivity {
48edf88417SAvi Kivity 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
49edf88417SAvi Kivity 		return 0;
50edf88417SAvi Kivity 
51edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
52edf88417SAvi Kivity }
53edf88417SAvi Kivity 
54edf88417SAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu)
55edf88417SAvi Kivity {
56edf88417SAvi Kivity #ifdef CONFIG_X86_64
57*41d6af11SAmit Shah 	return vcpu->arch.shadow_efer & EFER_LMA;
58edf88417SAvi Kivity #else
59edf88417SAvi Kivity 	return 0;
60edf88417SAvi Kivity #endif
61edf88417SAvi Kivity }
62edf88417SAvi Kivity 
63edf88417SAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu)
64edf88417SAvi Kivity {
65edf88417SAvi Kivity 	return vcpu->arch.cr4 & X86_CR4_PAE;
66edf88417SAvi Kivity }
67edf88417SAvi Kivity 
68edf88417SAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu)
69edf88417SAvi Kivity {
70edf88417SAvi Kivity 	return vcpu->arch.cr4 & X86_CR4_PSE;
71edf88417SAvi Kivity }
72edf88417SAvi Kivity 
73edf88417SAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu)
74edf88417SAvi Kivity {
75edf88417SAvi Kivity 	return vcpu->arch.cr0 & X86_CR0_PG;
76edf88417SAvi Kivity }
77edf88417SAvi Kivity 
78edf88417SAvi Kivity #endif
79