xref: /openbmc/linux/arch/x86/kvm/x86.h (revision 2359ccdd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
4 
5 #include <linux/kvm_host.h>
6 #include <asm/pvclock.h>
7 #include "kvm_cache_regs.h"
8 
9 #define KVM_DEFAULT_PLE_GAP		128
10 #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
11 #define KVM_DEFAULT_PLE_WINDOW_GROW	2
12 #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
13 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
14 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
15 #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
16 
17 static inline unsigned int __grow_ple_window(unsigned int val,
18 		unsigned int base, unsigned int modifier, unsigned int max)
19 {
20 	u64 ret = val;
21 
22 	if (modifier < 1)
23 		return base;
24 
25 	if (modifier < base)
26 		ret *= modifier;
27 	else
28 		ret += modifier;
29 
30 	return min(ret, (u64)max);
31 }
32 
33 static inline unsigned int __shrink_ple_window(unsigned int val,
34 		unsigned int base, unsigned int modifier, unsigned int min)
35 {
36 	if (modifier < 1)
37 		return base;
38 
39 	if (modifier < base)
40 		val /= modifier;
41 	else
42 		val -= modifier;
43 
44 	return max(val, min);
45 }
46 
47 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
48 
49 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
50 {
51 	vcpu->arch.exception.pending = false;
52 	vcpu->arch.exception.injected = false;
53 }
54 
55 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
56 	bool soft)
57 {
58 	vcpu->arch.interrupt.injected = true;
59 	vcpu->arch.interrupt.soft = soft;
60 	vcpu->arch.interrupt.nr = vector;
61 }
62 
63 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
64 {
65 	vcpu->arch.interrupt.injected = false;
66 }
67 
68 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
69 {
70 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
71 		vcpu->arch.nmi_injected;
72 }
73 
74 static inline bool kvm_exception_is_soft(unsigned int nr)
75 {
76 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
77 }
78 
79 static inline bool is_protmode(struct kvm_vcpu *vcpu)
80 {
81 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
82 }
83 
84 static inline int is_long_mode(struct kvm_vcpu *vcpu)
85 {
86 #ifdef CONFIG_X86_64
87 	return vcpu->arch.efer & EFER_LMA;
88 #else
89 	return 0;
90 #endif
91 }
92 
93 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
94 {
95 	int cs_db, cs_l;
96 
97 	if (!is_long_mode(vcpu))
98 		return false;
99 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
100 	return cs_l;
101 }
102 
103 static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
104 {
105 #ifdef CONFIG_X86_64
106 	return (vcpu->arch.efer & EFER_LMA) &&
107 		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
108 #else
109 	return 0;
110 #endif
111 }
112 
113 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
114 {
115 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
116 }
117 
118 static inline int is_pae(struct kvm_vcpu *vcpu)
119 {
120 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
121 }
122 
123 static inline int is_pse(struct kvm_vcpu *vcpu)
124 {
125 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
126 }
127 
128 static inline int is_paging(struct kvm_vcpu *vcpu)
129 {
130 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
131 }
132 
133 static inline u32 bit(int bitno)
134 {
135 	return 1 << (bitno & 31);
136 }
137 
138 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
139 {
140 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
141 }
142 
143 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
144 {
145 	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
146 }
147 
148 static inline u64 get_canonical(u64 la, u8 vaddr_bits)
149 {
150 	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
151 }
152 
153 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
154 {
155 #ifdef CONFIG_X86_64
156 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
157 #else
158 	return false;
159 #endif
160 }
161 
162 static inline bool emul_is_noncanonical_address(u64 la,
163 						struct x86_emulate_ctxt *ctxt)
164 {
165 #ifdef CONFIG_X86_64
166 	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
167 #else
168 	return false;
169 #endif
170 }
171 
172 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
173 					gva_t gva, gfn_t gfn, unsigned access)
174 {
175 	/*
176 	 * If this is a shadow nested page table, the "GVA" is
177 	 * actually a nGPA.
178 	 */
179 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
180 	vcpu->arch.access = access;
181 	vcpu->arch.mmio_gfn = gfn;
182 	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
183 }
184 
185 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
186 {
187 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
188 }
189 
190 /*
191  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
192  * clear all mmio cache info.
193  */
194 #define MMIO_GVA_ANY (~(gva_t)0)
195 
196 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
197 {
198 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
199 		return;
200 
201 	vcpu->arch.mmio_gva = 0;
202 }
203 
204 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
205 {
206 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
207 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
208 		return true;
209 
210 	return false;
211 }
212 
213 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
214 {
215 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
216 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
217 		return true;
218 
219 	return false;
220 }
221 
222 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
223 					       enum kvm_reg reg)
224 {
225 	unsigned long val = kvm_register_read(vcpu, reg);
226 
227 	return is_64_bit_mode(vcpu) ? val : (u32)val;
228 }
229 
230 static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
231 				       enum kvm_reg reg,
232 				       unsigned long val)
233 {
234 	if (!is_64_bit_mode(vcpu))
235 		val = (u32)val;
236 	return kvm_register_write(vcpu, reg, val);
237 }
238 
239 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
240 {
241 	return !(kvm->arch.disabled_quirks & quirk);
242 }
243 
244 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
245 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
246 
247 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
248 u64 get_kvmclock_ns(struct kvm *kvm);
249 
250 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
251 	gva_t addr, void *val, unsigned int bytes,
252 	struct x86_exception *exception);
253 
254 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
255 	gva_t addr, void *val, unsigned int bytes,
256 	struct x86_exception *exception);
257 
258 int handle_ud(struct kvm_vcpu *vcpu);
259 
260 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
261 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
262 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
263 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
264 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
265 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
266 					  int page_num);
267 bool kvm_vector_hashing_enabled(void);
268 
269 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
270 				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
271 				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
272 				| XFEATURE_MASK_PKRU)
273 extern u64 host_xcr0;
274 
275 extern u64 kvm_supported_xcr0(void);
276 
277 extern unsigned int min_timer_period_us;
278 
279 extern unsigned int lapic_timer_advance_ns;
280 
281 extern bool enable_vmware_backdoor;
282 
283 extern struct static_key kvm_no_apic_vcpu;
284 
285 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
286 {
287 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
288 				   vcpu->arch.virtual_tsc_shift);
289 }
290 
291 /* Same "calling convention" as do_div:
292  * - divide (n << 32) by base
293  * - put result in n
294  * - return remainder
295  */
296 #define do_shl32_div32(n, base)					\
297 	({							\
298 	    u32 __quot, __rem;					\
299 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
300 			: "rm" (base), "0" (0), "1" ((u32) n));	\
301 	    n = __quot;						\
302 	    __rem;						\
303 	 })
304 
305 #define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
306 #define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
307 #define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
308 #define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
309                                               KVM_X86_DISABLE_EXITS_HTL | \
310                                               KVM_X86_DISABLE_EXITS_PAUSE)
311 
312 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
313 {
314 	return kvm->arch.mwait_in_guest;
315 }
316 
317 static inline bool kvm_hlt_in_guest(struct kvm *kvm)
318 {
319 	return kvm->arch.hlt_in_guest;
320 }
321 
322 static inline bool kvm_pause_in_guest(struct kvm *kvm)
323 {
324 	return kvm->arch.pause_in_guest;
325 }
326 
327 DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
328 
329 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
330 {
331 	__this_cpu_write(current_vcpu, vcpu);
332 }
333 
334 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
335 {
336 	__this_cpu_write(current_vcpu, NULL);
337 }
338 
339 #endif
340