xref: /openbmc/linux/arch/x86/kvm/x86.h (revision 78700c0a)
1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
3 
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
6 
7 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
8 
9 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
10 {
11 	vcpu->arch.exception.pending = false;
12 }
13 
14 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
15 	bool soft)
16 {
17 	vcpu->arch.interrupt.pending = true;
18 	vcpu->arch.interrupt.soft = soft;
19 	vcpu->arch.interrupt.nr = vector;
20 }
21 
22 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
23 {
24 	vcpu->arch.interrupt.pending = false;
25 }
26 
27 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
28 {
29 	return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
30 		vcpu->arch.nmi_injected;
31 }
32 
33 static inline bool kvm_exception_is_soft(unsigned int nr)
34 {
35 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
36 }
37 
38 static inline bool is_protmode(struct kvm_vcpu *vcpu)
39 {
40 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
41 }
42 
43 static inline int is_long_mode(struct kvm_vcpu *vcpu)
44 {
45 #ifdef CONFIG_X86_64
46 	return vcpu->arch.efer & EFER_LMA;
47 #else
48 	return 0;
49 #endif
50 }
51 
52 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
53 {
54 	int cs_db, cs_l;
55 
56 	if (!is_long_mode(vcpu))
57 		return false;
58 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
59 	return cs_l;
60 }
61 
62 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
63 {
64 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
65 }
66 
67 static inline int is_pae(struct kvm_vcpu *vcpu)
68 {
69 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
70 }
71 
72 static inline int is_pse(struct kvm_vcpu *vcpu)
73 {
74 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
75 }
76 
77 static inline int is_paging(struct kvm_vcpu *vcpu)
78 {
79 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
80 }
81 
82 static inline u32 bit(int bitno)
83 {
84 	return 1 << (bitno & 31);
85 }
86 
87 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
88 					gva_t gva, gfn_t gfn, unsigned access)
89 {
90 	vcpu->arch.mmio_gva = gva & PAGE_MASK;
91 	vcpu->arch.access = access;
92 	vcpu->arch.mmio_gfn = gfn;
93 	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
94 }
95 
96 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
97 {
98 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
99 }
100 
101 /*
102  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
103  * clear all mmio cache info.
104  */
105 #define MMIO_GVA_ANY (~(gva_t)0)
106 
107 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
108 {
109 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
110 		return;
111 
112 	vcpu->arch.mmio_gva = 0;
113 }
114 
115 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
116 {
117 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
118 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
119 		return true;
120 
121 	return false;
122 }
123 
124 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
125 {
126 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
127 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
128 		return true;
129 
130 	return false;
131 }
132 
133 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
134 					       enum kvm_reg reg)
135 {
136 	unsigned long val = kvm_register_read(vcpu, reg);
137 
138 	return is_64_bit_mode(vcpu) ? val : (u32)val;
139 }
140 
141 static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
142 				       enum kvm_reg reg,
143 				       unsigned long val)
144 {
145 	if (!is_64_bit_mode(vcpu))
146 		val = (u32)val;
147 	return kvm_register_write(vcpu, reg, val);
148 }
149 
150 static inline u64 get_kernel_ns(void)
151 {
152 	return ktime_get_boot_ns();
153 }
154 
155 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
156 {
157 	return !(kvm->arch.disabled_quirks & quirk);
158 }
159 
160 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
161 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
162 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
163 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
164 
165 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
166 
167 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
168 	gva_t addr, void *val, unsigned int bytes,
169 	struct x86_exception *exception);
170 
171 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
172 	gva_t addr, void *val, unsigned int bytes,
173 	struct x86_exception *exception);
174 
175 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
176 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
177 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
178 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
179 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
181 					  int page_num);
182 bool kvm_vector_hashing_enabled(void);
183 
184 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
185 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
186 				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
187 				| XFEATURE_MASK_PKRU)
188 extern u64 host_xcr0;
189 
190 extern u64 kvm_supported_xcr0(void);
191 
192 extern unsigned int min_timer_period_us;
193 
194 extern unsigned int lapic_timer_advance_ns;
195 
196 extern struct static_key kvm_no_apic_vcpu;
197 
198 /* Same "calling convention" as do_div:
199  * - divide (n << 32) by base
200  * - put result in n
201  * - return remainder
202  */
203 #define do_shl32_div32(n, base)					\
204 	({							\
205 	    u32 __quot, __rem;					\
206 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
207 			: "rm" (base), "0" (0), "1" ((u32) n));	\
208 	    n = __quot;						\
209 	    __rem;						\
210 	 })
211 
212 #endif
213