xref: /openbmc/linux/arch/x86/kvm/mmu.h (revision b6dcefde)
1 #ifndef __KVM_X86_MMU_H
2 #define __KVM_X86_MMU_H
3 
4 #include <linux/kvm_host.h>
5 
6 #define PT64_PT_BITS 9
7 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
8 #define PT32_PT_BITS 10
9 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
10 
11 #define PT_WRITABLE_SHIFT 1
12 
13 #define PT_PRESENT_MASK (1ULL << 0)
14 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
15 #define PT_USER_MASK (1ULL << 2)
16 #define PT_PWT_MASK (1ULL << 3)
17 #define PT_PCD_MASK (1ULL << 4)
18 #define PT_ACCESSED_SHIFT 5
19 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
20 #define PT_DIRTY_MASK (1ULL << 6)
21 #define PT_PAGE_SIZE_MASK (1ULL << 7)
22 #define PT_PAT_MASK (1ULL << 7)
23 #define PT_GLOBAL_MASK (1ULL << 8)
24 #define PT64_NX_SHIFT 63
25 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
26 
27 #define PT_PAT_SHIFT 7
28 #define PT_DIR_PAT_SHIFT 12
29 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
30 
31 #define PT32_DIR_PSE36_SIZE 4
32 #define PT32_DIR_PSE36_SHIFT 13
33 #define PT32_DIR_PSE36_MASK \
34 	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
35 
36 #define PT64_ROOT_LEVEL 4
37 #define PT32_ROOT_LEVEL 2
38 #define PT32E_ROOT_LEVEL 3
39 
40 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
41 
42 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
43 {
44 	if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
45 		__kvm_mmu_free_some_pages(vcpu);
46 }
47 
48 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
49 {
50 	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
51 		return 0;
52 
53 	return kvm_mmu_load(vcpu);
54 }
55 
56 static inline int is_long_mode(struct kvm_vcpu *vcpu)
57 {
58 #ifdef CONFIG_X86_64
59 	return vcpu->arch.shadow_efer & EFER_LMA;
60 #else
61 	return 0;
62 #endif
63 }
64 
65 static inline int is_pae(struct kvm_vcpu *vcpu)
66 {
67 	return vcpu->arch.cr4 & X86_CR4_PAE;
68 }
69 
70 static inline int is_pse(struct kvm_vcpu *vcpu)
71 {
72 	return vcpu->arch.cr4 & X86_CR4_PSE;
73 }
74 
75 static inline int is_paging(struct kvm_vcpu *vcpu)
76 {
77 	return vcpu->arch.cr0 & X86_CR0_PG;
78 }
79 
80 static inline int is_present_gpte(unsigned long pte)
81 {
82 	return pte & PT_PRESENT_MASK;
83 }
84 
85 #endif
86