1 /*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
19 #include <sys/ioctl.h>
20 #include <sys/utsname.h>
21 #include <sys/syscall.h>
22
23 #include <linux/kvm.h>
24 #include "standard-headers/asm-x86/kvm_para.h"
25 #include "hw/xen/interface/arch-x86/cpuid.h"
26
27 #include "cpu.h"
28 #include "host-cpu.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/hw_accel.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
33 #include "kvm_i386.h"
34 #include "sev.h"
35 #include "xen-emu.h"
36 #include "hyperv.h"
37 #include "hyperv-proto.h"
38
39 #include "exec/gdbstub.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/main-loop.h"
42 #include "qemu/ratelimit.h"
43 #include "qemu/config-file.h"
44 #include "qemu/error-report.h"
45 #include "qemu/memalign.h"
46 #include "hw/i386/x86.h"
47 #include "hw/i386/kvm/xen_evtchn.h"
48 #include "hw/i386/pc.h"
49 #include "hw/i386/apic.h"
50 #include "hw/i386/apic_internal.h"
51 #include "hw/i386/apic-msidef.h"
52 #include "hw/i386/intel_iommu.h"
53 #include "hw/i386/x86-iommu.h"
54 #include "hw/i386/e820_memory_layout.h"
55
56 #include "hw/xen/xen.h"
57
58 #include "hw/pci/pci.h"
59 #include "hw/pci/msi.h"
60 #include "hw/pci/msix.h"
61 #include "migration/blocker.h"
62 #include "exec/memattrs.h"
63 #include "trace.h"
64
65 #include CONFIG_DEVICES
66
67 //#define DEBUG_KVM
68
69 #ifdef DEBUG_KVM
70 #define DPRINTF(fmt, ...) \
71 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
72 #else
73 #define DPRINTF(fmt, ...) \
74 do { } while (0)
75 #endif
76
77 /* From arch/x86/kvm/lapic.h */
78 #define KVM_APIC_BUS_CYCLE_NS 1
79 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
80
81 #define MSR_KVM_WALL_CLOCK 0x11
82 #define MSR_KVM_SYSTEM_TIME 0x12
83
84 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
85 * 255 kvm_msr_entry structs */
86 #define MSR_BUF_SIZE 4096
87
88 static void kvm_init_msrs(X86CPU *cpu);
89
90 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
91 KVM_CAP_INFO(SET_TSS_ADDR),
92 KVM_CAP_INFO(EXT_CPUID),
93 KVM_CAP_INFO(MP_STATE),
94 KVM_CAP_INFO(SIGNAL_MSI),
95 KVM_CAP_INFO(IRQ_ROUTING),
96 KVM_CAP_INFO(DEBUGREGS),
97 KVM_CAP_INFO(XSAVE),
98 KVM_CAP_INFO(VCPU_EVENTS),
99 KVM_CAP_INFO(X86_ROBUST_SINGLESTEP),
100 KVM_CAP_INFO(MCE),
101 KVM_CAP_INFO(ADJUST_CLOCK),
102 KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR),
103 KVM_CAP_LAST_INFO
104 };
105
106 static bool has_msr_star;
107 static bool has_msr_hsave_pa;
108 static bool has_msr_tsc_aux;
109 static bool has_msr_tsc_adjust;
110 static bool has_msr_tsc_deadline;
111 static bool has_msr_feature_control;
112 static bool has_msr_misc_enable;
113 static bool has_msr_smbase;
114 static bool has_msr_bndcfgs;
115 static int lm_capable_kernel;
116 static bool has_msr_hv_hypercall;
117 static bool has_msr_hv_crash;
118 static bool has_msr_hv_reset;
119 static bool has_msr_hv_vpindex;
120 static bool hv_vpindex_settable;
121 static bool has_msr_hv_runtime;
122 static bool has_msr_hv_synic;
123 static bool has_msr_hv_stimer;
124 static bool has_msr_hv_frequencies;
125 static bool has_msr_hv_reenlightenment;
126 static bool has_msr_hv_syndbg_options;
127 static bool has_msr_xss;
128 static bool has_msr_umwait;
129 static bool has_msr_spec_ctrl;
130 static bool has_tsc_scale_msr;
131 static bool has_msr_tsx_ctrl;
132 static bool has_msr_virt_ssbd;
133 static bool has_msr_smi_count;
134 static bool has_msr_arch_capabs;
135 static bool has_msr_core_capabs;
136 static bool has_msr_vmx_vmfunc;
137 static bool has_msr_ucode_rev;
138 static bool has_msr_vmx_procbased_ctls2;
139 static bool has_msr_perf_capabs;
140 static bool has_msr_pkrs;
141
142 static uint32_t has_architectural_pmu_version;
143 static uint32_t num_architectural_pmu_gp_counters;
144 static uint32_t num_architectural_pmu_fixed_counters;
145
146 static int has_xsave2;
147 static int has_xcrs;
148 static int has_sregs2;
149 static int has_exception_payload;
150 static int has_triple_fault_event;
151
152 static bool has_msr_mcg_ext_ctl;
153
154 static struct kvm_cpuid2 *cpuid_cache;
155 static struct kvm_cpuid2 *hv_cpuid_cache;
156 static struct kvm_msr_list *kvm_feature_msrs;
157
158 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
159
160 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
161 static RateLimit bus_lock_ratelimit_ctrl;
162 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
163
kvm_has_smm(void)164 bool kvm_has_smm(void)
165 {
166 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
167 }
168
kvm_has_adjust_clock_stable(void)169 bool kvm_has_adjust_clock_stable(void)
170 {
171 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
172
173 return (ret & KVM_CLOCK_TSC_STABLE);
174 }
175
kvm_has_exception_payload(void)176 bool kvm_has_exception_payload(void)
177 {
178 return has_exception_payload;
179 }
180
kvm_x2apic_api_set_flags(uint64_t flags)181 static bool kvm_x2apic_api_set_flags(uint64_t flags)
182 {
183 KVMState *s = KVM_STATE(current_accel());
184
185 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
186 }
187
188 #define MEMORIZE(fn, _result) \
189 ({ \
190 static bool _memorized; \
191 \
192 if (_memorized) { \
193 return _result; \
194 } \
195 _memorized = true; \
196 _result = fn; \
197 })
198
199 static bool has_x2apic_api;
200
kvm_has_x2apic_api(void)201 bool kvm_has_x2apic_api(void)
202 {
203 return has_x2apic_api;
204 }
205
kvm_enable_x2apic(void)206 bool kvm_enable_x2apic(void)
207 {
208 return MEMORIZE(
209 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
210 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
211 has_x2apic_api);
212 }
213
kvm_hv_vpindex_settable(void)214 bool kvm_hv_vpindex_settable(void)
215 {
216 return hv_vpindex_settable;
217 }
218
kvm_get_tsc(CPUState * cs)219 static int kvm_get_tsc(CPUState *cs)
220 {
221 X86CPU *cpu = X86_CPU(cs);
222 CPUX86State *env = &cpu->env;
223 uint64_t value;
224 int ret;
225
226 if (env->tsc_valid) {
227 return 0;
228 }
229
230 env->tsc_valid = !runstate_is_running();
231
232 ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
233 if (ret < 0) {
234 return ret;
235 }
236
237 env->tsc = value;
238 return 0;
239 }
240
do_kvm_synchronize_tsc(CPUState * cpu,run_on_cpu_data arg)241 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
242 {
243 kvm_get_tsc(cpu);
244 }
245
kvm_synchronize_all_tsc(void)246 void kvm_synchronize_all_tsc(void)
247 {
248 CPUState *cpu;
249
250 if (kvm_enabled()) {
251 CPU_FOREACH(cpu) {
252 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
253 }
254 }
255 }
256
try_get_cpuid(KVMState * s,int max)257 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
258 {
259 struct kvm_cpuid2 *cpuid;
260 int r, size;
261
262 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
263 cpuid = g_malloc0(size);
264 cpuid->nent = max;
265 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
266 if (r == 0 && cpuid->nent >= max) {
267 r = -E2BIG;
268 }
269 if (r < 0) {
270 if (r == -E2BIG) {
271 g_free(cpuid);
272 return NULL;
273 } else {
274 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
275 strerror(-r));
276 exit(1);
277 }
278 }
279 return cpuid;
280 }
281
282 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
283 * for all entries.
284 */
get_supported_cpuid(KVMState * s)285 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
286 {
287 struct kvm_cpuid2 *cpuid;
288 int max = 1;
289
290 if (cpuid_cache != NULL) {
291 return cpuid_cache;
292 }
293 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
294 max *= 2;
295 }
296 cpuid_cache = cpuid;
297 return cpuid;
298 }
299
host_tsx_broken(void)300 static bool host_tsx_broken(void)
301 {
302 int family, model, stepping;\
303 char vendor[CPUID_VENDOR_SZ + 1];
304
305 host_cpu_vendor_fms(vendor, &family, &model, &stepping);
306
307 /* Check if we are running on a Haswell host known to have broken TSX */
308 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
309 (family == 6) &&
310 ((model == 63 && stepping < 4) ||
311 model == 60 || model == 69 || model == 70);
312 }
313
314 /* Returns the value for a specific register on the cpuid entry
315 */
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,int reg)316 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
317 {
318 uint32_t ret = 0;
319 switch (reg) {
320 case R_EAX:
321 ret = entry->eax;
322 break;
323 case R_EBX:
324 ret = entry->ebx;
325 break;
326 case R_ECX:
327 ret = entry->ecx;
328 break;
329 case R_EDX:
330 ret = entry->edx;
331 break;
332 }
333 return ret;
334 }
335
336 /* Find matching entry for function/index on kvm_cpuid2 struct
337 */
cpuid_find_entry(struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)338 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
339 uint32_t function,
340 uint32_t index)
341 {
342 int i;
343 for (i = 0; i < cpuid->nent; ++i) {
344 if (cpuid->entries[i].function == function &&
345 cpuid->entries[i].index == index) {
346 return &cpuid->entries[i];
347 }
348 }
349 /* not found: */
350 return NULL;
351 }
352
kvm_arch_get_supported_cpuid(KVMState * s,uint32_t function,uint32_t index,int reg)353 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
354 uint32_t index, int reg)
355 {
356 struct kvm_cpuid2 *cpuid;
357 uint32_t ret = 0;
358 uint32_t cpuid_1_edx, unused;
359 uint64_t bitmask;
360
361 cpuid = get_supported_cpuid(s);
362
363 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
364 if (entry) {
365 ret = cpuid_entry_get_reg(entry, reg);
366 }
367
368 /* Fixups for the data returned by KVM, below */
369
370 if (function == 1 && reg == R_EDX) {
371 /* KVM before 2.6.30 misreports the following features */
372 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
373 /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */
374 ret |= CPUID_HT;
375 } else if (function == 1 && reg == R_ECX) {
376 /* We can set the hypervisor flag, even if KVM does not return it on
377 * GET_SUPPORTED_CPUID
378 */
379 ret |= CPUID_EXT_HYPERVISOR;
380 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
381 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
382 * and the irqchip is in the kernel.
383 */
384 if (kvm_irqchip_in_kernel() &&
385 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
386 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
387 }
388
389 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
390 * without the in-kernel irqchip
391 */
392 if (!kvm_irqchip_in_kernel()) {
393 ret &= ~CPUID_EXT_X2APIC;
394 }
395
396 if (enable_cpu_pm) {
397 int disable_exits = kvm_check_extension(s,
398 KVM_CAP_X86_DISABLE_EXITS);
399
400 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
401 ret |= CPUID_EXT_MONITOR;
402 }
403 }
404 } else if (function == 6 && reg == R_EAX) {
405 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
406 } else if (function == 7 && index == 0 && reg == R_EBX) {
407 /* Not new instructions, just an optimization. */
408 uint32_t ebx;
409 host_cpuid(7, 0, &unused, &ebx, &unused, &unused);
410 ret |= ebx & CPUID_7_0_EBX_ERMS;
411
412 if (host_tsx_broken()) {
413 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
414 }
415 } else if (function == 7 && index == 0 && reg == R_EDX) {
416 /* Not new instructions, just an optimization. */
417 uint32_t edx;
418 host_cpuid(7, 0, &unused, &unused, &unused, &edx);
419 ret |= edx & CPUID_7_0_EDX_FSRM;
420
421 /*
422 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
423 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
424 * returned by KVM_GET_MSR_INDEX_LIST.
425 */
426 if (!has_msr_arch_capabs) {
427 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
428 }
429 } else if (function == 7 && index == 1 && reg == R_EAX) {
430 /* Not new instructions, just an optimization. */
431 uint32_t eax;
432 host_cpuid(7, 1, &eax, &unused, &unused, &unused);
433 ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC);
434 } else if (function == 7 && index == 2 && reg == R_EDX) {
435 uint32_t edx;
436 host_cpuid(7, 2, &unused, &unused, &unused, &edx);
437 ret |= edx & CPUID_7_2_EDX_MCDT_NO;
438 } else if (function == 0xd && index == 0 &&
439 (reg == R_EAX || reg == R_EDX)) {
440 /*
441 * The value returned by KVM_GET_SUPPORTED_CPUID does not include
442 * features that still have to be enabled with the arch_prctl
443 * system call. QEMU needs the full value, which is retrieved
444 * with KVM_GET_DEVICE_ATTR.
445 */
446 struct kvm_device_attr attr = {
447 .group = 0,
448 .attr = KVM_X86_XCOMP_GUEST_SUPP,
449 .addr = (unsigned long) &bitmask
450 };
451
452 bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
453 if (!sys_attr) {
454 return ret;
455 }
456
457 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
458 if (rc < 0) {
459 if (rc != -ENXIO) {
460 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
461 "error: %d", rc);
462 }
463 return ret;
464 }
465 ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
466 } else if (function == 0x80000001 && reg == R_ECX) {
467 /*
468 * It's safe to enable TOPOEXT even if it's not returned by
469 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
470 * us to keep CPU models including TOPOEXT runnable on older kernels.
471 */
472 ret |= CPUID_EXT3_TOPOEXT;
473 } else if (function == 0x80000001 && reg == R_EDX) {
474 /* On Intel, kvm returns cpuid according to the Intel spec,
475 * so add missing bits according to the AMD spec:
476 */
477 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
478 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
479 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
480 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
481 * be enabled without the in-kernel irqchip
482 */
483 if (!kvm_irqchip_in_kernel()) {
484 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
485 }
486 if (kvm_irqchip_is_split()) {
487 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
488 }
489 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
490 ret |= 1U << KVM_HINTS_REALTIME;
491 }
492
493 return ret;
494 }
495
kvm_arch_get_supported_msr_feature(KVMState * s,uint32_t index)496 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
497 {
498 struct {
499 struct kvm_msrs info;
500 struct kvm_msr_entry entries[1];
501 } msr_data = {};
502 uint64_t value;
503 uint32_t ret, can_be_one, must_be_one;
504
505 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
506 return 0;
507 }
508
509 /* Check if requested MSR is supported feature MSR */
510 int i;
511 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
512 if (kvm_feature_msrs->indices[i] == index) {
513 break;
514 }
515 if (i == kvm_feature_msrs->nmsrs) {
516 return 0; /* if the feature MSR is not supported, simply return 0 */
517 }
518
519 msr_data.info.nmsrs = 1;
520 msr_data.entries[0].index = index;
521
522 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
523 if (ret != 1) {
524 error_report("KVM get MSR (index=0x%x) feature failed, %s",
525 index, strerror(-ret));
526 exit(1);
527 }
528
529 value = msr_data.entries[0].data;
530 switch (index) {
531 case MSR_IA32_VMX_PROCBASED_CTLS2:
532 if (!has_msr_vmx_procbased_ctls2) {
533 /* KVM forgot to add these bits for some time, do this ourselves. */
534 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
535 CPUID_XSAVE_XSAVES) {
536 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
537 }
538 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
539 CPUID_EXT_RDRAND) {
540 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
541 }
542 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
543 CPUID_7_0_EBX_INVPCID) {
544 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
545 }
546 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
547 CPUID_7_0_EBX_RDSEED) {
548 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
549 }
550 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
551 CPUID_EXT2_RDTSCP) {
552 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
553 }
554 }
555 /* fall through */
556 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
557 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
558 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
559 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
560 /*
561 * Return true for bits that can be one, but do not have to be one.
562 * The SDM tells us which bits could have a "must be one" setting,
563 * so we can do the opposite transformation in make_vmx_msr_value.
564 */
565 must_be_one = (uint32_t)value;
566 can_be_one = (uint32_t)(value >> 32);
567 return can_be_one & ~must_be_one;
568
569 default:
570 return value;
571 }
572 }
573
kvm_get_mce_cap_supported(KVMState * s,uint64_t * mce_cap,int * max_banks)574 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
575 int *max_banks)
576 {
577 *max_banks = kvm_check_extension(s, KVM_CAP_MCE);
578 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
579 }
580
kvm_mce_inject(X86CPU * cpu,hwaddr paddr,int code)581 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
582 {
583 CPUState *cs = CPU(cpu);
584 CPUX86State *env = &cpu->env;
585 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
586 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
587 uint64_t mcg_status = MCG_STATUS_MCIP;
588 int flags = 0;
589
590 if (code == BUS_MCEERR_AR) {
591 status |= MCI_STATUS_AR | 0x134;
592 mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV;
593 } else {
594 status |= 0xc0;
595 mcg_status |= MCG_STATUS_RIPV;
596 }
597
598 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
599 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
600 * guest kernel back into env->mcg_ext_ctl.
601 */
602 cpu_synchronize_state(cs);
603 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
604 mcg_status |= MCG_STATUS_LMCE;
605 flags = 0;
606 }
607
608 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
609 (MCM_ADDR_PHYS << 6) | 0xc, flags);
610 }
611
emit_hypervisor_memory_failure(MemoryFailureAction action,bool ar)612 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
613 {
614 MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
615
616 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
617 &mff);
618 }
619
hardware_memory_error(void * host_addr)620 static void hardware_memory_error(void *host_addr)
621 {
622 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
623 error_report("QEMU got Hardware memory error at addr %p", host_addr);
624 exit(1);
625 }
626
kvm_arch_on_sigbus_vcpu(CPUState * c,int code,void * addr)627 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
628 {
629 X86CPU *cpu = X86_CPU(c);
630 CPUX86State *env = &cpu->env;
631 ram_addr_t ram_addr;
632 hwaddr paddr;
633
634 /* If we get an action required MCE, it has been injected by KVM
635 * while the VM was running. An action optional MCE instead should
636 * be coming from the main thread, which qemu_init_sigbus identifies
637 * as the "early kill" thread.
638 */
639 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
640
641 if ((env->mcg_cap & MCG_SER_P) && addr) {
642 ram_addr = qemu_ram_addr_from_host(addr);
643 if (ram_addr != RAM_ADDR_INVALID &&
644 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
645 kvm_hwpoison_page_add(ram_addr);
646 kvm_mce_inject(cpu, paddr, code);
647
648 /*
649 * Use different logging severity based on error type.
650 * If there is additional MCE reporting on the hypervisor, QEMU VA
651 * could be another source to identify the PA and MCE details.
652 */
653 if (code == BUS_MCEERR_AR) {
654 error_report("Guest MCE Memory Error at QEMU addr %p and "
655 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
656 addr, paddr, "BUS_MCEERR_AR");
657 } else {
658 warn_report("Guest MCE Memory Error at QEMU addr %p and "
659 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
660 addr, paddr, "BUS_MCEERR_AO");
661 }
662
663 return;
664 }
665
666 if (code == BUS_MCEERR_AO) {
667 warn_report("Hardware memory error at addr %p of type %s "
668 "for memory used by QEMU itself instead of guest system!",
669 addr, "BUS_MCEERR_AO");
670 }
671 }
672
673 if (code == BUS_MCEERR_AR) {
674 hardware_memory_error(addr);
675 }
676
677 /* Hope we are lucky for AO MCE, just notify a event */
678 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
679 }
680
kvm_queue_exception(CPUX86State * env,int32_t exception_nr,uint8_t exception_has_payload,uint64_t exception_payload)681 static void kvm_queue_exception(CPUX86State *env,
682 int32_t exception_nr,
683 uint8_t exception_has_payload,
684 uint64_t exception_payload)
685 {
686 assert(env->exception_nr == -1);
687 assert(!env->exception_pending);
688 assert(!env->exception_injected);
689 assert(!env->exception_has_payload);
690
691 env->exception_nr = exception_nr;
692
693 if (has_exception_payload) {
694 env->exception_pending = 1;
695
696 env->exception_has_payload = exception_has_payload;
697 env->exception_payload = exception_payload;
698 } else {
699 env->exception_injected = 1;
700
701 if (exception_nr == EXCP01_DB) {
702 assert(exception_has_payload);
703 env->dr[6] = exception_payload;
704 } else if (exception_nr == EXCP0E_PAGE) {
705 assert(exception_has_payload);
706 env->cr[2] = exception_payload;
707 } else {
708 assert(!exception_has_payload);
709 }
710 }
711 }
712
cpu_update_state(void * opaque,bool running,RunState state)713 static void cpu_update_state(void *opaque, bool running, RunState state)
714 {
715 CPUX86State *env = opaque;
716
717 if (running) {
718 env->tsc_valid = false;
719 }
720 }
721
kvm_arch_vcpu_id(CPUState * cs)722 unsigned long kvm_arch_vcpu_id(CPUState *cs)
723 {
724 X86CPU *cpu = X86_CPU(cs);
725 return cpu->apic_id;
726 }
727
728 #ifndef KVM_CPUID_SIGNATURE_NEXT
729 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
730 #endif
731
hyperv_enabled(X86CPU * cpu)732 static bool hyperv_enabled(X86CPU *cpu)
733 {
734 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
735 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
736 cpu->hyperv_features || cpu->hyperv_passthrough);
737 }
738
739 /*
740 * Check whether target_freq is within conservative
741 * ntp correctable bounds (250ppm) of freq
742 */
freq_within_bounds(int freq,int target_freq)743 static inline bool freq_within_bounds(int freq, int target_freq)
744 {
745 int max_freq = freq + (freq * 250 / 1000000);
746 int min_freq = freq - (freq * 250 / 1000000);
747
748 if (target_freq >= min_freq && target_freq <= max_freq) {
749 return true;
750 }
751
752 return false;
753 }
754
kvm_arch_set_tsc_khz(CPUState * cs)755 static int kvm_arch_set_tsc_khz(CPUState *cs)
756 {
757 X86CPU *cpu = X86_CPU(cs);
758 CPUX86State *env = &cpu->env;
759 int r, cur_freq;
760 bool set_ioctl = false;
761
762 if (!env->tsc_khz) {
763 return 0;
764 }
765
766 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
767 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
768
769 /*
770 * If TSC scaling is supported, attempt to set TSC frequency.
771 */
772 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
773 set_ioctl = true;
774 }
775
776 /*
777 * If desired TSC frequency is within bounds of NTP correction,
778 * attempt to set TSC frequency.
779 */
780 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
781 set_ioctl = true;
782 }
783
784 r = set_ioctl ?
785 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
786 -ENOTSUP;
787
788 if (r < 0) {
789 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
790 * TSC frequency doesn't match the one we want.
791 */
792 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
793 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
794 -ENOTSUP;
795 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
796 warn_report("TSC frequency mismatch between "
797 "VM (%" PRId64 " kHz) and host (%d kHz), "
798 "and TSC scaling unavailable",
799 env->tsc_khz, cur_freq);
800 return r;
801 }
802 }
803
804 return 0;
805 }
806
tsc_is_stable_and_known(CPUX86State * env)807 static bool tsc_is_stable_and_known(CPUX86State *env)
808 {
809 if (!env->tsc_khz) {
810 return false;
811 }
812 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
813 || env->user_tsc_khz;
814 }
815
816 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
817
818 static struct {
819 const char *desc;
820 struct {
821 uint32_t func;
822 int reg;
823 uint32_t bits;
824 } flags[2];
825 uint64_t dependencies;
826 } kvm_hyperv_properties[] = {
827 [HYPERV_FEAT_RELAXED] = {
828 .desc = "relaxed timing (hv-relaxed)",
829 .flags = {
830 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
831 .bits = HV_RELAXED_TIMING_RECOMMENDED}
832 }
833 },
834 [HYPERV_FEAT_VAPIC] = {
835 .desc = "virtual APIC (hv-vapic)",
836 .flags = {
837 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
838 .bits = HV_APIC_ACCESS_AVAILABLE}
839 }
840 },
841 [HYPERV_FEAT_TIME] = {
842 .desc = "clocksources (hv-time)",
843 .flags = {
844 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
845 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
846 }
847 },
848 [HYPERV_FEAT_CRASH] = {
849 .desc = "crash MSRs (hv-crash)",
850 .flags = {
851 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
852 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
853 }
854 },
855 [HYPERV_FEAT_RESET] = {
856 .desc = "reset MSR (hv-reset)",
857 .flags = {
858 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
859 .bits = HV_RESET_AVAILABLE}
860 }
861 },
862 [HYPERV_FEAT_VPINDEX] = {
863 .desc = "VP_INDEX MSR (hv-vpindex)",
864 .flags = {
865 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
866 .bits = HV_VP_INDEX_AVAILABLE}
867 }
868 },
869 [HYPERV_FEAT_RUNTIME] = {
870 .desc = "VP_RUNTIME MSR (hv-runtime)",
871 .flags = {
872 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
873 .bits = HV_VP_RUNTIME_AVAILABLE}
874 }
875 },
876 [HYPERV_FEAT_SYNIC] = {
877 .desc = "synthetic interrupt controller (hv-synic)",
878 .flags = {
879 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
880 .bits = HV_SYNIC_AVAILABLE}
881 }
882 },
883 [HYPERV_FEAT_STIMER] = {
884 .desc = "synthetic timers (hv-stimer)",
885 .flags = {
886 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
887 .bits = HV_SYNTIMERS_AVAILABLE}
888 },
889 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
890 },
891 [HYPERV_FEAT_FREQUENCIES] = {
892 .desc = "frequency MSRs (hv-frequencies)",
893 .flags = {
894 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
895 .bits = HV_ACCESS_FREQUENCY_MSRS},
896 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
897 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
898 }
899 },
900 [HYPERV_FEAT_REENLIGHTENMENT] = {
901 .desc = "reenlightenment MSRs (hv-reenlightenment)",
902 .flags = {
903 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
904 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
905 }
906 },
907 [HYPERV_FEAT_TLBFLUSH] = {
908 .desc = "paravirtualized TLB flush (hv-tlbflush)",
909 .flags = {
910 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
911 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
912 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
913 },
914 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
915 },
916 [HYPERV_FEAT_EVMCS] = {
917 .desc = "enlightened VMCS (hv-evmcs)",
918 .flags = {
919 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
920 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
921 },
922 .dependencies = BIT(HYPERV_FEAT_VAPIC)
923 },
924 [HYPERV_FEAT_IPI] = {
925 .desc = "paravirtualized IPI (hv-ipi)",
926 .flags = {
927 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
928 .bits = HV_CLUSTER_IPI_RECOMMENDED |
929 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
930 },
931 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
932 },
933 [HYPERV_FEAT_STIMER_DIRECT] = {
934 .desc = "direct mode synthetic timers (hv-stimer-direct)",
935 .flags = {
936 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
937 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
938 },
939 .dependencies = BIT(HYPERV_FEAT_STIMER)
940 },
941 [HYPERV_FEAT_AVIC] = {
942 .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
943 .flags = {
944 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
945 .bits = HV_DEPRECATING_AEOI_RECOMMENDED}
946 }
947 },
948 #ifdef CONFIG_SYNDBG
949 [HYPERV_FEAT_SYNDBG] = {
950 .desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
951 .flags = {
952 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
953 .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
954 },
955 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED)
956 },
957 #endif
958 [HYPERV_FEAT_MSR_BITMAP] = {
959 .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
960 .flags = {
961 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
962 .bits = HV_NESTED_MSR_BITMAP}
963 }
964 },
965 [HYPERV_FEAT_XMM_INPUT] = {
966 .desc = "XMM fast hypercall input (hv-xmm-input)",
967 .flags = {
968 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
969 .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE}
970 }
971 },
972 [HYPERV_FEAT_TLBFLUSH_EXT] = {
973 .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
974 .flags = {
975 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
976 .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE}
977 },
978 .dependencies = BIT(HYPERV_FEAT_TLBFLUSH)
979 },
980 [HYPERV_FEAT_TLBFLUSH_DIRECT] = {
981 .desc = "direct TLB flush (hv-tlbflush-direct)",
982 .flags = {
983 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
984 .bits = HV_NESTED_DIRECT_FLUSH}
985 },
986 .dependencies = BIT(HYPERV_FEAT_VAPIC)
987 },
988 };
989
try_get_hv_cpuid(CPUState * cs,int max,bool do_sys_ioctl)990 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
991 bool do_sys_ioctl)
992 {
993 struct kvm_cpuid2 *cpuid;
994 int r, size;
995
996 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
997 cpuid = g_malloc0(size);
998 cpuid->nent = max;
999
1000 if (do_sys_ioctl) {
1001 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1002 } else {
1003 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1004 }
1005 if (r == 0 && cpuid->nent >= max) {
1006 r = -E2BIG;
1007 }
1008 if (r < 0) {
1009 if (r == -E2BIG) {
1010 g_free(cpuid);
1011 return NULL;
1012 } else {
1013 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1014 strerror(-r));
1015 exit(1);
1016 }
1017 }
1018 return cpuid;
1019 }
1020
1021 /*
1022 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1023 * for all entries.
1024 */
get_supported_hv_cpuid(CPUState * cs)1025 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
1026 {
1027 struct kvm_cpuid2 *cpuid;
1028 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1029 int max = 11;
1030 int i;
1031 bool do_sys_ioctl;
1032
1033 do_sys_ioctl =
1034 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
1035
1036 /*
1037 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1038 * unsupported, kvm_hyperv_expand_features() checks for that.
1039 */
1040 assert(do_sys_ioctl || cs->kvm_state);
1041
1042 /*
1043 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1044 * -E2BIG, however, it doesn't report back the right size. Keep increasing
1045 * it and re-trying until we succeed.
1046 */
1047 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
1048 max++;
1049 }
1050
1051 /*
1052 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1053 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1054 * information early, just check for the capability and set the bit
1055 * manually.
1056 */
1057 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
1058 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1059 for (i = 0; i < cpuid->nent; i++) {
1060 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
1061 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1062 }
1063 }
1064 }
1065
1066 return cpuid;
1067 }
1068
1069 /*
1070 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1071 * leaves from KVM_CAP_HYPERV* and present MSRs data.
1072 */
get_supported_hv_cpuid_legacy(CPUState * cs)1073 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
1074 {
1075 X86CPU *cpu = X86_CPU(cs);
1076 struct kvm_cpuid2 *cpuid;
1077 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
1078
1079 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1080 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1081 cpuid->nent = 2;
1082
1083 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1084 entry_feat = &cpuid->entries[0];
1085 entry_feat->function = HV_CPUID_FEATURES;
1086
1087 entry_recomm = &cpuid->entries[1];
1088 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1089 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1090
1091 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1092 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1093 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1094 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1095 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1096 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1097 }
1098
1099 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1100 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1101 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1102 }
1103
1104 if (has_msr_hv_frequencies) {
1105 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1106 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1107 }
1108
1109 if (has_msr_hv_crash) {
1110 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1111 }
1112
1113 if (has_msr_hv_reenlightenment) {
1114 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1115 }
1116
1117 if (has_msr_hv_reset) {
1118 entry_feat->eax |= HV_RESET_AVAILABLE;
1119 }
1120
1121 if (has_msr_hv_vpindex) {
1122 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1123 }
1124
1125 if (has_msr_hv_runtime) {
1126 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1127 }
1128
1129 if (has_msr_hv_synic) {
1130 unsigned int cap = cpu->hyperv_synic_kvm_only ?
1131 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1132
1133 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1134 entry_feat->eax |= HV_SYNIC_AVAILABLE;
1135 }
1136 }
1137
1138 if (has_msr_hv_stimer) {
1139 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1140 }
1141
1142 if (has_msr_hv_syndbg_options) {
1143 entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE;
1144 entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1145 entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED;
1146 }
1147
1148 if (kvm_check_extension(cs->kvm_state,
1149 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1150 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1151 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1152 }
1153
1154 if (kvm_check_extension(cs->kvm_state,
1155 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1156 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1157 }
1158
1159 if (kvm_check_extension(cs->kvm_state,
1160 KVM_CAP_HYPERV_SEND_IPI) > 0) {
1161 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1162 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1163 }
1164
1165 return cpuid;
1166 }
1167
hv_cpuid_get_host(CPUState * cs,uint32_t func,int reg)1168 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
1169 {
1170 struct kvm_cpuid_entry2 *entry;
1171 struct kvm_cpuid2 *cpuid;
1172
1173 if (hv_cpuid_cache) {
1174 cpuid = hv_cpuid_cache;
1175 } else {
1176 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1177 cpuid = get_supported_hv_cpuid(cs);
1178 } else {
1179 /*
1180 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1181 * before KVM context is created but this is only done when
1182 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1183 * KVM_CAP_HYPERV_CPUID.
1184 */
1185 assert(cs->kvm_state);
1186
1187 cpuid = get_supported_hv_cpuid_legacy(cs);
1188 }
1189 hv_cpuid_cache = cpuid;
1190 }
1191
1192 if (!cpuid) {
1193 return 0;
1194 }
1195
1196 entry = cpuid_find_entry(cpuid, func, 0);
1197 if (!entry) {
1198 return 0;
1199 }
1200
1201 return cpuid_entry_get_reg(entry, reg);
1202 }
1203
hyperv_feature_supported(CPUState * cs,int feature)1204 static bool hyperv_feature_supported(CPUState *cs, int feature)
1205 {
1206 uint32_t func, bits;
1207 int i, reg;
1208
1209 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1210
1211 func = kvm_hyperv_properties[feature].flags[i].func;
1212 reg = kvm_hyperv_properties[feature].flags[i].reg;
1213 bits = kvm_hyperv_properties[feature].flags[i].bits;
1214
1215 if (!func) {
1216 continue;
1217 }
1218
1219 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
1220 return false;
1221 }
1222 }
1223
1224 return true;
1225 }
1226
1227 /* Checks that all feature dependencies are enabled */
hv_feature_check_deps(X86CPU * cpu,int feature,Error ** errp)1228 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
1229 {
1230 uint64_t deps;
1231 int dep_feat;
1232
1233 deps = kvm_hyperv_properties[feature].dependencies;
1234 while (deps) {
1235 dep_feat = ctz64(deps);
1236 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1237 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1238 kvm_hyperv_properties[feature].desc,
1239 kvm_hyperv_properties[dep_feat].desc);
1240 return false;
1241 }
1242 deps &= ~(1ull << dep_feat);
1243 }
1244
1245 return true;
1246 }
1247
hv_build_cpuid_leaf(CPUState * cs,uint32_t func,int reg)1248 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
1249 {
1250 X86CPU *cpu = X86_CPU(cs);
1251 uint32_t r = 0;
1252 int i, j;
1253
1254 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
1255 if (!hyperv_feat_enabled(cpu, i)) {
1256 continue;
1257 }
1258
1259 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
1260 if (kvm_hyperv_properties[i].flags[j].func != func) {
1261 continue;
1262 }
1263 if (kvm_hyperv_properties[i].flags[j].reg != reg) {
1264 continue;
1265 }
1266
1267 r |= kvm_hyperv_properties[i].flags[j].bits;
1268 }
1269 }
1270
1271 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1272 if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) {
1273 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1274 r |= DEFAULT_EVMCS_VERSION;
1275 }
1276 }
1277
1278 return r;
1279 }
1280
1281 /*
1282 * Expand Hyper-V CPU features. In partucular, check that all the requested
1283 * features are supported by the host and the sanity of the configuration
1284 * (that all the required dependencies are included). Also, this takes care
1285 * of 'hv_passthrough' mode and fills the environment with all supported
1286 * Hyper-V features.
1287 */
kvm_hyperv_expand_features(X86CPU * cpu,Error ** errp)1288 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
1289 {
1290 CPUState *cs = CPU(cpu);
1291 Error *local_err = NULL;
1292 int feat;
1293
1294 if (!hyperv_enabled(cpu))
1295 return true;
1296
1297 /*
1298 * When kvm_hyperv_expand_features is called at CPU feature expansion
1299 * time per-CPU kvm_state is not available yet so we can only proceed
1300 * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1301 */
1302 if (!cs->kvm_state &&
1303 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
1304 return true;
1305
1306 if (cpu->hyperv_passthrough) {
1307 cpu->hyperv_vendor_id[0] =
1308 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
1309 cpu->hyperv_vendor_id[1] =
1310 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
1311 cpu->hyperv_vendor_id[2] =
1312 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
1313 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
1314 sizeof(cpu->hyperv_vendor_id) + 1);
1315 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
1316 sizeof(cpu->hyperv_vendor_id));
1317 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
1318
1319 cpu->hyperv_interface_id[0] =
1320 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
1321 cpu->hyperv_interface_id[1] =
1322 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
1323 cpu->hyperv_interface_id[2] =
1324 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
1325 cpu->hyperv_interface_id[3] =
1326 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
1327
1328 cpu->hyperv_ver_id_build =
1329 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
1330 cpu->hyperv_ver_id_major =
1331 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
1332 cpu->hyperv_ver_id_minor =
1333 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
1334 cpu->hyperv_ver_id_sp =
1335 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
1336 cpu->hyperv_ver_id_sb =
1337 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
1338 cpu->hyperv_ver_id_sn =
1339 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
1340
1341 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
1342 R_EAX);
1343 cpu->hyperv_limits[0] =
1344 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
1345 cpu->hyperv_limits[1] =
1346 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
1347 cpu->hyperv_limits[2] =
1348 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
1349
1350 cpu->hyperv_spinlock_attempts =
1351 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
1352
1353 /*
1354 * Mark feature as enabled in 'cpu->hyperv_features' as
1355 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1356 */
1357 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1358 if (hyperv_feature_supported(cs, feat)) {
1359 cpu->hyperv_features |= BIT(feat);
1360 }
1361 }
1362 } else {
1363 /* Check features availability and dependencies */
1364 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1365 /* If the feature was not requested skip it. */
1366 if (!hyperv_feat_enabled(cpu, feat)) {
1367 continue;
1368 }
1369
1370 /* Check if the feature is supported by KVM */
1371 if (!hyperv_feature_supported(cs, feat)) {
1372 error_setg(errp, "Hyper-V %s is not supported by kernel",
1373 kvm_hyperv_properties[feat].desc);
1374 return false;
1375 }
1376
1377 /* Check dependencies */
1378 if (!hv_feature_check_deps(cpu, feat, &local_err)) {
1379 error_propagate(errp, local_err);
1380 return false;
1381 }
1382 }
1383 }
1384
1385 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1386 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1387 !cpu->hyperv_synic_kvm_only &&
1388 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1389 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1390 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1391 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1392 return false;
1393 }
1394
1395 return true;
1396 }
1397
1398 /*
1399 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1400 */
hyperv_fill_cpuids(CPUState * cs,struct kvm_cpuid_entry2 * cpuid_ent)1401 static int hyperv_fill_cpuids(CPUState *cs,
1402 struct kvm_cpuid_entry2 *cpuid_ent)
1403 {
1404 X86CPU *cpu = X86_CPU(cs);
1405 struct kvm_cpuid_entry2 *c;
1406 uint32_t signature[3];
1407 uint32_t cpuid_i = 0, max_cpuid_leaf = 0;
1408 uint32_t nested_eax =
1409 hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX);
1410
1411 max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES :
1412 HV_CPUID_IMPLEMENT_LIMITS;
1413
1414 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1415 max_cpuid_leaf =
1416 MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
1417 }
1418
1419 c = &cpuid_ent[cpuid_i++];
1420 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1421 c->eax = max_cpuid_leaf;
1422 c->ebx = cpu->hyperv_vendor_id[0];
1423 c->ecx = cpu->hyperv_vendor_id[1];
1424 c->edx = cpu->hyperv_vendor_id[2];
1425
1426 c = &cpuid_ent[cpuid_i++];
1427 c->function = HV_CPUID_INTERFACE;
1428 c->eax = cpu->hyperv_interface_id[0];
1429 c->ebx = cpu->hyperv_interface_id[1];
1430 c->ecx = cpu->hyperv_interface_id[2];
1431 c->edx = cpu->hyperv_interface_id[3];
1432
1433 c = &cpuid_ent[cpuid_i++];
1434 c->function = HV_CPUID_VERSION;
1435 c->eax = cpu->hyperv_ver_id_build;
1436 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
1437 cpu->hyperv_ver_id_minor;
1438 c->ecx = cpu->hyperv_ver_id_sp;
1439 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
1440 (cpu->hyperv_ver_id_sn & 0xffffff);
1441
1442 c = &cpuid_ent[cpuid_i++];
1443 c->function = HV_CPUID_FEATURES;
1444 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
1445 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
1446 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
1447
1448 /* Unconditionally required with any Hyper-V enlightenment */
1449 c->eax |= HV_HYPERCALL_AVAILABLE;
1450
1451 /* SynIC and Vmbus devices require messages/signals hypercalls */
1452 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1453 !cpu->hyperv_synic_kvm_only) {
1454 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
1455 }
1456
1457
1458 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1459 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1460
1461 c = &cpuid_ent[cpuid_i++];
1462 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1463 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
1464 c->ebx = cpu->hyperv_spinlock_attempts;
1465
1466 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
1467 !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
1468 c->eax |= HV_APIC_ACCESS_RECOMMENDED;
1469 }
1470
1471 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1472 c->eax |= HV_NO_NONARCH_CORESHARING;
1473 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1474 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
1475 HV_NO_NONARCH_CORESHARING;
1476 }
1477
1478 c = &cpuid_ent[cpuid_i++];
1479 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1480 c->eax = cpu->hv_max_vps;
1481 c->ebx = cpu->hyperv_limits[0];
1482 c->ecx = cpu->hyperv_limits[1];
1483 c->edx = cpu->hyperv_limits[2];
1484
1485 if (nested_eax) {
1486 uint32_t function;
1487
1488 /* Create zeroed 0x40000006..0x40000009 leaves */
1489 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1490 function < HV_CPUID_NESTED_FEATURES; function++) {
1491 c = &cpuid_ent[cpuid_i++];
1492 c->function = function;
1493 }
1494
1495 c = &cpuid_ent[cpuid_i++];
1496 c->function = HV_CPUID_NESTED_FEATURES;
1497 c->eax = nested_eax;
1498 }
1499
1500 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1501 c = &cpuid_ent[cpuid_i++];
1502 c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS;
1503 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1504 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1505 memcpy(signature, "Microsoft VS", 12);
1506 c->eax = 0;
1507 c->ebx = signature[0];
1508 c->ecx = signature[1];
1509 c->edx = signature[2];
1510
1511 c = &cpuid_ent[cpuid_i++];
1512 c->function = HV_CPUID_SYNDBG_INTERFACE;
1513 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
1514 c->eax = signature[0];
1515 c->ebx = 0;
1516 c->ecx = 0;
1517 c->edx = 0;
1518
1519 c = &cpuid_ent[cpuid_i++];
1520 c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1521 c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
1522 c->ebx = 0;
1523 c->ecx = 0;
1524 c->edx = 0;
1525 }
1526
1527 return cpuid_i;
1528 }
1529
1530 static Error *hv_passthrough_mig_blocker;
1531 static Error *hv_no_nonarch_cs_mig_blocker;
1532
1533 /* Checks that the exposed eVMCS version range is supported by KVM */
evmcs_version_supported(uint16_t evmcs_version,uint16_t supported_evmcs_version)1534 static bool evmcs_version_supported(uint16_t evmcs_version,
1535 uint16_t supported_evmcs_version)
1536 {
1537 uint8_t min_version = evmcs_version & 0xff;
1538 uint8_t max_version = evmcs_version >> 8;
1539 uint8_t min_supported_version = supported_evmcs_version & 0xff;
1540 uint8_t max_supported_version = supported_evmcs_version >> 8;
1541
1542 return (min_version >= min_supported_version) &&
1543 (max_version <= max_supported_version);
1544 }
1545
hyperv_init_vcpu(X86CPU * cpu)1546 static int hyperv_init_vcpu(X86CPU *cpu)
1547 {
1548 CPUState *cs = CPU(cpu);
1549 Error *local_err = NULL;
1550 int ret;
1551
1552 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1553 error_setg(&hv_passthrough_mig_blocker,
1554 "'hv-passthrough' CPU flag prevents migration, use explicit"
1555 " set of hv-* flags instead");
1556 ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err);
1557 if (ret < 0) {
1558 error_report_err(local_err);
1559 return ret;
1560 }
1561 }
1562
1563 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1564 hv_no_nonarch_cs_mig_blocker == NULL) {
1565 error_setg(&hv_no_nonarch_cs_mig_blocker,
1566 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1567 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1568 " make sure SMT is disabled and/or that vCPUs are properly"
1569 " pinned)");
1570 ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err);
1571 if (ret < 0) {
1572 error_report_err(local_err);
1573 return ret;
1574 }
1575 }
1576
1577 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1578 /*
1579 * the kernel doesn't support setting vp_index; assert that its value
1580 * is in sync
1581 */
1582 uint64_t value;
1583
1584 ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
1585 if (ret < 0) {
1586 return ret;
1587 }
1588
1589 if (value != hyperv_vp_index(CPU(cpu))) {
1590 error_report("kernel's vp_index != QEMU's vp_index");
1591 return -ENXIO;
1592 }
1593 }
1594
1595 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1596 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1597 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1598 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1599 if (ret < 0) {
1600 error_report("failed to turn on HyperV SynIC in KVM: %s",
1601 strerror(-ret));
1602 return ret;
1603 }
1604
1605 if (!cpu->hyperv_synic_kvm_only) {
1606 ret = hyperv_x86_synic_add(cpu);
1607 if (ret < 0) {
1608 error_report("failed to create HyperV SynIC: %s",
1609 strerror(-ret));
1610 return ret;
1611 }
1612 }
1613 }
1614
1615 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1616 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
1617 uint16_t supported_evmcs_version;
1618
1619 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1620 (uintptr_t)&supported_evmcs_version);
1621
1622 /*
1623 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1624 * option sets. Note: we hardcode the maximum supported eVMCS version
1625 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1626 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1627 * to be added.
1628 */
1629 if (ret < 0) {
1630 error_report("Hyper-V %s is not supported by kernel",
1631 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1632 return ret;
1633 }
1634
1635 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
1636 error_report("eVMCS version range [%d..%d] is not supported by "
1637 "kernel (supported: [%d..%d])", evmcs_version & 0xff,
1638 evmcs_version >> 8, supported_evmcs_version & 0xff,
1639 supported_evmcs_version >> 8);
1640 return -ENOTSUP;
1641 }
1642 }
1643
1644 if (cpu->hyperv_enforce_cpuid) {
1645 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
1646 if (ret < 0) {
1647 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1648 strerror(-ret));
1649 return ret;
1650 }
1651 }
1652
1653 return 0;
1654 }
1655
1656 static Error *invtsc_mig_blocker;
1657
1658 #define KVM_MAX_CPUID_ENTRIES 100
1659
kvm_init_xsave(CPUX86State * env)1660 static void kvm_init_xsave(CPUX86State *env)
1661 {
1662 if (has_xsave2) {
1663 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
1664 } else {
1665 env->xsave_buf_len = sizeof(struct kvm_xsave);
1666 }
1667
1668 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1669 memset(env->xsave_buf, 0, env->xsave_buf_len);
1670 /*
1671 * The allocated storage must be large enough for all of the
1672 * possible XSAVE state components.
1673 */
1674 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
1675 env->xsave_buf_len);
1676 }
1677
kvm_init_nested_state(CPUX86State * env)1678 static void kvm_init_nested_state(CPUX86State *env)
1679 {
1680 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1681 uint32_t size;
1682
1683 if (!env->nested_state) {
1684 return;
1685 }
1686
1687 size = env->nested_state->size;
1688
1689 memset(env->nested_state, 0, size);
1690 env->nested_state->size = size;
1691
1692 if (cpu_has_vmx(env)) {
1693 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1694 vmx_hdr = &env->nested_state->hdr.vmx;
1695 vmx_hdr->vmxon_pa = -1ull;
1696 vmx_hdr->vmcs12_pa = -1ull;
1697 } else if (cpu_has_svm(env)) {
1698 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
1699 }
1700 }
1701
kvm_arch_init_vcpu(CPUState * cs)1702 int kvm_arch_init_vcpu(CPUState *cs)
1703 {
1704 struct {
1705 struct kvm_cpuid2 cpuid;
1706 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1707 } cpuid_data;
1708 /*
1709 * The kernel defines these structs with padding fields so there
1710 * should be no extra padding in our cpuid_data struct.
1711 */
1712 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1713 sizeof(struct kvm_cpuid2) +
1714 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1715
1716 X86CPU *cpu = X86_CPU(cs);
1717 CPUX86State *env = &cpu->env;
1718 uint32_t limit, i, j, cpuid_i;
1719 uint32_t unused;
1720 struct kvm_cpuid_entry2 *c;
1721 uint32_t signature[3];
1722 int kvm_base = KVM_CPUID_SIGNATURE;
1723 int max_nested_state_len;
1724 int r;
1725 Error *local_err = NULL;
1726
1727 memset(&cpuid_data, 0, sizeof(cpuid_data));
1728
1729 cpuid_i = 0;
1730
1731 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
1732
1733 r = kvm_arch_set_tsc_khz(cs);
1734 if (r < 0) {
1735 return r;
1736 }
1737
1738 /* vcpu's TSC frequency is either specified by user, or following
1739 * the value used by KVM if the former is not present. In the
1740 * latter case, we query it from KVM and record in env->tsc_khz,
1741 * so that vcpu's TSC frequency can be migrated later via this field.
1742 */
1743 if (!env->tsc_khz) {
1744 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1745 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1746 -ENOTSUP;
1747 if (r > 0) {
1748 env->tsc_khz = r;
1749 }
1750 }
1751
1752 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
1753
1754 /*
1755 * kvm_hyperv_expand_features() is called here for the second time in case
1756 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
1757 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
1758 * check which Hyper-V enlightenments are supported and which are not, we
1759 * can still proceed and check/expand Hyper-V enlightenments here so legacy
1760 * behavior is preserved.
1761 */
1762 if (!kvm_hyperv_expand_features(cpu, &local_err)) {
1763 error_report_err(local_err);
1764 return -ENOSYS;
1765 }
1766
1767 if (hyperv_enabled(cpu)) {
1768 r = hyperv_init_vcpu(cpu);
1769 if (r) {
1770 return r;
1771 }
1772
1773 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
1774 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1775 has_msr_hv_hypercall = true;
1776 }
1777
1778 if (cs->kvm_state->xen_version) {
1779 #ifdef CONFIG_XEN_EMU
1780 struct kvm_cpuid_entry2 *xen_max_leaf;
1781
1782 memcpy(signature, "XenVMMXenVMM", 12);
1783
1784 xen_max_leaf = c = &cpuid_data.entries[cpuid_i++];
1785 c->function = kvm_base + XEN_CPUID_SIGNATURE;
1786 c->eax = kvm_base + XEN_CPUID_TIME;
1787 c->ebx = signature[0];
1788 c->ecx = signature[1];
1789 c->edx = signature[2];
1790
1791 c = &cpuid_data.entries[cpuid_i++];
1792 c->function = kvm_base + XEN_CPUID_VENDOR;
1793 c->eax = cs->kvm_state->xen_version;
1794 c->ebx = 0;
1795 c->ecx = 0;
1796 c->edx = 0;
1797
1798 c = &cpuid_data.entries[cpuid_i++];
1799 c->function = kvm_base + XEN_CPUID_HVM_MSR;
1800 /* Number of hypercall-transfer pages */
1801 c->eax = 1;
1802 /* Hypercall MSR base address */
1803 if (hyperv_enabled(cpu)) {
1804 c->ebx = XEN_HYPERCALL_MSR_HYPERV;
1805 kvm_xen_init(cs->kvm_state, c->ebx);
1806 } else {
1807 c->ebx = XEN_HYPERCALL_MSR;
1808 }
1809 c->ecx = 0;
1810 c->edx = 0;
1811
1812 c = &cpuid_data.entries[cpuid_i++];
1813 c->function = kvm_base + XEN_CPUID_TIME;
1814 c->eax = ((!!tsc_is_stable_and_known(env) << 1) |
1815 (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2));
1816 /* default=0 (emulate if necessary) */
1817 c->ebx = 0;
1818 /* guest tsc frequency */
1819 c->ecx = env->user_tsc_khz;
1820 /* guest tsc incarnation (migration count) */
1821 c->edx = 0;
1822
1823 c = &cpuid_data.entries[cpuid_i++];
1824 c->function = kvm_base + XEN_CPUID_HVM;
1825 xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM;
1826 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) {
1827 c->function = kvm_base + XEN_CPUID_HVM;
1828
1829 if (cpu->xen_vapic) {
1830 c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
1831 c->eax |= XEN_HVM_CPUID_X2APIC_VIRT;
1832 }
1833
1834 c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
1835
1836 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) {
1837 c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
1838 c->ebx = cs->cpu_index;
1839 }
1840
1841 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) {
1842 c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR;
1843 }
1844 }
1845
1846 r = kvm_xen_init_vcpu(cs);
1847 if (r) {
1848 return r;
1849 }
1850
1851 kvm_base += 0x100;
1852 #else /* CONFIG_XEN_EMU */
1853 /* This should never happen as kvm_arch_init() would have died first. */
1854 fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n");
1855 abort();
1856 #endif
1857 } else if (cpu->expose_kvm) {
1858 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1859 c = &cpuid_data.entries[cpuid_i++];
1860 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1861 c->eax = KVM_CPUID_FEATURES | kvm_base;
1862 c->ebx = signature[0];
1863 c->ecx = signature[1];
1864 c->edx = signature[2];
1865
1866 c = &cpuid_data.entries[cpuid_i++];
1867 c->function = KVM_CPUID_FEATURES | kvm_base;
1868 c->eax = env->features[FEAT_KVM];
1869 c->edx = env->features[FEAT_KVM_HINTS];
1870 }
1871
1872 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1873
1874 if (cpu->kvm_pv_enforce_cpuid) {
1875 r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
1876 if (r < 0) {
1877 fprintf(stderr,
1878 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
1879 strerror(-r));
1880 abort();
1881 }
1882 }
1883
1884 for (i = 0; i <= limit; i++) {
1885 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1886 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1887 abort();
1888 }
1889 c = &cpuid_data.entries[cpuid_i++];
1890
1891 switch (i) {
1892 case 2: {
1893 /* Keep reading function 2 till all the input is received */
1894 int times;
1895
1896 c->function = i;
1897 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1898 KVM_CPUID_FLAG_STATE_READ_NEXT;
1899 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1900 times = c->eax & 0xff;
1901
1902 for (j = 1; j < times; ++j) {
1903 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1904 fprintf(stderr, "cpuid_data is full, no space for "
1905 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1906 abort();
1907 }
1908 c = &cpuid_data.entries[cpuid_i++];
1909 c->function = i;
1910 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1911 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1912 }
1913 break;
1914 }
1915 case 0x1f:
1916 if (env->nr_dies < 2) {
1917 cpuid_i--;
1918 break;
1919 }
1920 /* fallthrough */
1921 case 4:
1922 case 0xb:
1923 case 0xd:
1924 for (j = 0; ; j++) {
1925 if (i == 0xd && j == 64) {
1926 break;
1927 }
1928
1929 if (i == 0x1f && j == 64) {
1930 break;
1931 }
1932
1933 c->function = i;
1934 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1935 c->index = j;
1936 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1937
1938 if (i == 4 && c->eax == 0) {
1939 break;
1940 }
1941 if (i == 0xb && !(c->ecx & 0xff00)) {
1942 break;
1943 }
1944 if (i == 0x1f && !(c->ecx & 0xff00)) {
1945 break;
1946 }
1947 if (i == 0xd && c->eax == 0) {
1948 continue;
1949 }
1950 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1951 fprintf(stderr, "cpuid_data is full, no space for "
1952 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1953 abort();
1954 }
1955 c = &cpuid_data.entries[cpuid_i++];
1956 }
1957 break;
1958 case 0x12:
1959 for (j = 0; ; j++) {
1960 c->function = i;
1961 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1962 c->index = j;
1963 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1964
1965 if (j > 1 && (c->eax & 0xf) != 1) {
1966 break;
1967 }
1968
1969 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1970 fprintf(stderr, "cpuid_data is full, no space for "
1971 "cpuid(eax:0x12,ecx:0x%x)\n", j);
1972 abort();
1973 }
1974 c = &cpuid_data.entries[cpuid_i++];
1975 }
1976 break;
1977 case 0x7:
1978 case 0x14:
1979 case 0x1d:
1980 case 0x1e: {
1981 uint32_t times;
1982
1983 c->function = i;
1984 c->index = 0;
1985 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1986 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1987 times = c->eax;
1988
1989 for (j = 1; j <= times; ++j) {
1990 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1991 fprintf(stderr, "cpuid_data is full, no space for "
1992 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1993 abort();
1994 }
1995 c = &cpuid_data.entries[cpuid_i++];
1996 c->function = i;
1997 c->index = j;
1998 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1999 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
2000 }
2001 break;
2002 }
2003 default:
2004 c->function = i;
2005 c->flags = 0;
2006 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2007 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
2008 /*
2009 * KVM already returns all zeroes if a CPUID entry is missing,
2010 * so we can omit it and avoid hitting KVM's 80-entry limit.
2011 */
2012 cpuid_i--;
2013 }
2014 break;
2015 }
2016 }
2017
2018 if (limit >= 0x0a) {
2019 uint32_t eax, edx;
2020
2021 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
2022
2023 has_architectural_pmu_version = eax & 0xff;
2024 if (has_architectural_pmu_version > 0) {
2025 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
2026
2027 /* Shouldn't be more than 32, since that's the number of bits
2028 * available in EBX to tell us _which_ counters are available.
2029 * Play it safe.
2030 */
2031 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
2032 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
2033 }
2034
2035 if (has_architectural_pmu_version > 1) {
2036 num_architectural_pmu_fixed_counters = edx & 0x1f;
2037
2038 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
2039 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
2040 }
2041 }
2042 }
2043 }
2044
2045 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
2046
2047 for (i = 0x80000000; i <= limit; i++) {
2048 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2049 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
2050 abort();
2051 }
2052 c = &cpuid_data.entries[cpuid_i++];
2053
2054 switch (i) {
2055 case 0x8000001d:
2056 /* Query for all AMD cache information leaves */
2057 for (j = 0; ; j++) {
2058 c->function = i;
2059 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2060 c->index = j;
2061 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
2062
2063 if (c->eax == 0) {
2064 break;
2065 }
2066 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2067 fprintf(stderr, "cpuid_data is full, no space for "
2068 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
2069 abort();
2070 }
2071 c = &cpuid_data.entries[cpuid_i++];
2072 }
2073 break;
2074 default:
2075 c->function = i;
2076 c->flags = 0;
2077 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2078 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
2079 /*
2080 * KVM already returns all zeroes if a CPUID entry is missing,
2081 * so we can omit it and avoid hitting KVM's 80-entry limit.
2082 */
2083 cpuid_i--;
2084 }
2085 break;
2086 }
2087 }
2088
2089 /* Call Centaur's CPUID instructions they are supported. */
2090 if (env->cpuid_xlevel2 > 0) {
2091 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
2092
2093 for (i = 0xC0000000; i <= limit; i++) {
2094 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2095 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
2096 abort();
2097 }
2098 c = &cpuid_data.entries[cpuid_i++];
2099
2100 c->function = i;
2101 c->flags = 0;
2102 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2103 }
2104 }
2105
2106 cpuid_data.cpuid.nent = cpuid_i;
2107
2108 if (((env->cpuid_version >> 8)&0xF) >= 6
2109 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2110 (CPUID_MCE | CPUID_MCA)) {
2111 uint64_t mcg_cap, unsupported_caps;
2112 int banks;
2113 int ret;
2114
2115 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
2116 if (ret < 0) {
2117 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
2118 return ret;
2119 }
2120
2121 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
2122 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2123 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
2124 return -ENOTSUP;
2125 }
2126
2127 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
2128 if (unsupported_caps) {
2129 if (unsupported_caps & MCG_LMCE_P) {
2130 error_report("kvm: LMCE not supported");
2131 return -ENOTSUP;
2132 }
2133 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
2134 unsupported_caps);
2135 }
2136
2137 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
2138 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
2139 if (ret < 0) {
2140 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
2141 return ret;
2142 }
2143 }
2144
2145 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
2146
2147 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
2148 if (c) {
2149 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
2150 !!(c->ecx & CPUID_EXT_SMX);
2151 }
2152
2153 c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
2154 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
2155 has_msr_feature_control = true;
2156 }
2157
2158 if (env->mcg_cap & MCG_LMCE_P) {
2159 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
2160 }
2161
2162 if (!env->user_tsc_khz) {
2163 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
2164 invtsc_mig_blocker == NULL) {
2165 error_setg(&invtsc_mig_blocker,
2166 "State blocked by non-migratable CPU device"
2167 " (invtsc flag)");
2168 r = migrate_add_blocker(&invtsc_mig_blocker, &local_err);
2169 if (r < 0) {
2170 error_report_err(local_err);
2171 return r;
2172 }
2173 }
2174 }
2175
2176 if (cpu->vmware_cpuid_freq
2177 /* Guests depend on 0x40000000 to detect this feature, so only expose
2178 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2179 && cpu->expose_kvm
2180 && kvm_base == KVM_CPUID_SIGNATURE
2181 /* TSC clock must be stable and known for this feature. */
2182 && tsc_is_stable_and_known(env)) {
2183
2184 c = &cpuid_data.entries[cpuid_i++];
2185 c->function = KVM_CPUID_SIGNATURE | 0x10;
2186 c->eax = env->tsc_khz;
2187 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
2188 c->ecx = c->edx = 0;
2189
2190 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
2191 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
2192 }
2193
2194 cpuid_data.cpuid.nent = cpuid_i;
2195
2196 cpuid_data.cpuid.padding = 0;
2197 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
2198 if (r) {
2199 goto fail;
2200 }
2201 kvm_init_xsave(env);
2202
2203 max_nested_state_len = kvm_max_nested_state_length();
2204 if (max_nested_state_len > 0) {
2205 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
2206
2207 if (cpu_has_vmx(env) || cpu_has_svm(env)) {
2208 env->nested_state = g_malloc0(max_nested_state_len);
2209 env->nested_state->size = max_nested_state_len;
2210
2211 kvm_init_nested_state(env);
2212 }
2213 }
2214
2215 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
2216
2217 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
2218 has_msr_tsc_aux = false;
2219 }
2220
2221 kvm_init_msrs(cpu);
2222
2223 return 0;
2224
2225 fail:
2226 migrate_del_blocker(&invtsc_mig_blocker);
2227
2228 return r;
2229 }
2230
kvm_arch_destroy_vcpu(CPUState * cs)2231 int kvm_arch_destroy_vcpu(CPUState *cs)
2232 {
2233 X86CPU *cpu = X86_CPU(cs);
2234 CPUX86State *env = &cpu->env;
2235
2236 g_free(env->xsave_buf);
2237
2238 g_free(cpu->kvm_msr_buf);
2239 cpu->kvm_msr_buf = NULL;
2240
2241 g_free(env->nested_state);
2242 env->nested_state = NULL;
2243
2244 qemu_del_vm_change_state_handler(cpu->vmsentry);
2245
2246 return 0;
2247 }
2248
kvm_arch_reset_vcpu(X86CPU * cpu)2249 void kvm_arch_reset_vcpu(X86CPU *cpu)
2250 {
2251 CPUX86State *env = &cpu->env;
2252
2253 env->xcr0 = 1;
2254 if (kvm_irqchip_in_kernel()) {
2255 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
2256 KVM_MP_STATE_UNINITIALIZED;
2257 } else {
2258 env->mp_state = KVM_MP_STATE_RUNNABLE;
2259 }
2260
2261 /* enabled by default */
2262 env->poll_control_msr = 1;
2263
2264 kvm_init_nested_state(env);
2265
2266 sev_es_set_reset_vector(CPU(cpu));
2267 }
2268
kvm_arch_after_reset_vcpu(X86CPU * cpu)2269 void kvm_arch_after_reset_vcpu(X86CPU *cpu)
2270 {
2271 CPUX86State *env = &cpu->env;
2272 int i;
2273
2274 /*
2275 * Reset SynIC after all other devices have been reset to let them remove
2276 * their SINT routes first.
2277 */
2278 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2279 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
2280 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
2281 }
2282
2283 hyperv_x86_synic_reset(cpu);
2284 }
2285 }
2286
kvm_arch_do_init_vcpu(X86CPU * cpu)2287 void kvm_arch_do_init_vcpu(X86CPU *cpu)
2288 {
2289 CPUX86State *env = &cpu->env;
2290
2291 /* APs get directly into wait-for-SIPI state. */
2292 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
2293 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
2294 }
2295 }
2296
kvm_get_supported_feature_msrs(KVMState * s)2297 static int kvm_get_supported_feature_msrs(KVMState *s)
2298 {
2299 int ret = 0;
2300
2301 if (kvm_feature_msrs != NULL) {
2302 return 0;
2303 }
2304
2305 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
2306 return 0;
2307 }
2308
2309 struct kvm_msr_list msr_list;
2310
2311 msr_list.nmsrs = 0;
2312 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
2313 if (ret < 0 && ret != -E2BIG) {
2314 error_report("Fetch KVM feature MSR list failed: %s",
2315 strerror(-ret));
2316 return ret;
2317 }
2318
2319 assert(msr_list.nmsrs > 0);
2320 kvm_feature_msrs = g_malloc0(sizeof(msr_list) +
2321 msr_list.nmsrs * sizeof(msr_list.indices[0]));
2322
2323 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
2324 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
2325
2326 if (ret < 0) {
2327 error_report("Fetch KVM feature MSR list failed: %s",
2328 strerror(-ret));
2329 g_free(kvm_feature_msrs);
2330 kvm_feature_msrs = NULL;
2331 return ret;
2332 }
2333
2334 return 0;
2335 }
2336
kvm_get_supported_msrs(KVMState * s)2337 static int kvm_get_supported_msrs(KVMState *s)
2338 {
2339 int ret = 0;
2340 struct kvm_msr_list msr_list, *kvm_msr_list;
2341
2342 /*
2343 * Obtain MSR list from KVM. These are the MSRs that we must
2344 * save/restore.
2345 */
2346 msr_list.nmsrs = 0;
2347 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
2348 if (ret < 0 && ret != -E2BIG) {
2349 return ret;
2350 }
2351 /*
2352 * Old kernel modules had a bug and could write beyond the provided
2353 * memory. Allocate at least a safe amount of 1K.
2354 */
2355 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
2356 msr_list.nmsrs *
2357 sizeof(msr_list.indices[0])));
2358
2359 kvm_msr_list->nmsrs = msr_list.nmsrs;
2360 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
2361 if (ret >= 0) {
2362 int i;
2363
2364 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
2365 switch (kvm_msr_list->indices[i]) {
2366 case MSR_STAR:
2367 has_msr_star = true;
2368 break;
2369 case MSR_VM_HSAVE_PA:
2370 has_msr_hsave_pa = true;
2371 break;
2372 case MSR_TSC_AUX:
2373 has_msr_tsc_aux = true;
2374 break;
2375 case MSR_TSC_ADJUST:
2376 has_msr_tsc_adjust = true;
2377 break;
2378 case MSR_IA32_TSCDEADLINE:
2379 has_msr_tsc_deadline = true;
2380 break;
2381 case MSR_IA32_SMBASE:
2382 has_msr_smbase = true;
2383 break;
2384 case MSR_SMI_COUNT:
2385 has_msr_smi_count = true;
2386 break;
2387 case MSR_IA32_MISC_ENABLE:
2388 has_msr_misc_enable = true;
2389 break;
2390 case MSR_IA32_BNDCFGS:
2391 has_msr_bndcfgs = true;
2392 break;
2393 case MSR_IA32_XSS:
2394 has_msr_xss = true;
2395 break;
2396 case MSR_IA32_UMWAIT_CONTROL:
2397 has_msr_umwait = true;
2398 break;
2399 case HV_X64_MSR_CRASH_CTL:
2400 has_msr_hv_crash = true;
2401 break;
2402 case HV_X64_MSR_RESET:
2403 has_msr_hv_reset = true;
2404 break;
2405 case HV_X64_MSR_VP_INDEX:
2406 has_msr_hv_vpindex = true;
2407 break;
2408 case HV_X64_MSR_VP_RUNTIME:
2409 has_msr_hv_runtime = true;
2410 break;
2411 case HV_X64_MSR_SCONTROL:
2412 has_msr_hv_synic = true;
2413 break;
2414 case HV_X64_MSR_STIMER0_CONFIG:
2415 has_msr_hv_stimer = true;
2416 break;
2417 case HV_X64_MSR_TSC_FREQUENCY:
2418 has_msr_hv_frequencies = true;
2419 break;
2420 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2421 has_msr_hv_reenlightenment = true;
2422 break;
2423 case HV_X64_MSR_SYNDBG_OPTIONS:
2424 has_msr_hv_syndbg_options = true;
2425 break;
2426 case MSR_IA32_SPEC_CTRL:
2427 has_msr_spec_ctrl = true;
2428 break;
2429 case MSR_AMD64_TSC_RATIO:
2430 has_tsc_scale_msr = true;
2431 break;
2432 case MSR_IA32_TSX_CTRL:
2433 has_msr_tsx_ctrl = true;
2434 break;
2435 case MSR_VIRT_SSBD:
2436 has_msr_virt_ssbd = true;
2437 break;
2438 case MSR_IA32_ARCH_CAPABILITIES:
2439 has_msr_arch_capabs = true;
2440 break;
2441 case MSR_IA32_CORE_CAPABILITY:
2442 has_msr_core_capabs = true;
2443 break;
2444 case MSR_IA32_PERF_CAPABILITIES:
2445 has_msr_perf_capabs = true;
2446 break;
2447 case MSR_IA32_VMX_VMFUNC:
2448 has_msr_vmx_vmfunc = true;
2449 break;
2450 case MSR_IA32_UCODE_REV:
2451 has_msr_ucode_rev = true;
2452 break;
2453 case MSR_IA32_VMX_PROCBASED_CTLS2:
2454 has_msr_vmx_procbased_ctls2 = true;
2455 break;
2456 case MSR_IA32_PKRS:
2457 has_msr_pkrs = true;
2458 break;
2459 }
2460 }
2461 }
2462
2463 g_free(kvm_msr_list);
2464
2465 return ret;
2466 }
2467
kvm_rdmsr_core_thread_count(X86CPU * cpu,uint32_t msr,uint64_t * val)2468 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
2469 uint64_t *val)
2470 {
2471 CPUState *cs = CPU(cpu);
2472
2473 *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
2474 *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
2475
2476 return true;
2477 }
2478
2479 static Notifier smram_machine_done;
2480 static KVMMemoryListener smram_listener;
2481 static AddressSpace smram_address_space;
2482 static MemoryRegion smram_as_root;
2483 static MemoryRegion smram_as_mem;
2484
register_smram_listener(Notifier * n,void * unused)2485 static void register_smram_listener(Notifier *n, void *unused)
2486 {
2487 MemoryRegion *smram =
2488 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2489
2490 /* Outer container... */
2491 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2492 memory_region_set_enabled(&smram_as_root, true);
2493
2494 /* ... with two regions inside: normal system memory with low
2495 * priority, and...
2496 */
2497 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2498 get_system_memory(), 0, ~0ull);
2499 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2500 memory_region_set_enabled(&smram_as_mem, true);
2501
2502 if (smram) {
2503 /* ... SMRAM with higher priority */
2504 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2505 memory_region_set_enabled(smram, true);
2506 }
2507
2508 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2509 kvm_memory_listener_register(kvm_state, &smram_listener,
2510 &smram_address_space, 1, "kvm-smram");
2511 }
2512
kvm_arch_get_default_type(MachineState * ms)2513 int kvm_arch_get_default_type(MachineState *ms)
2514 {
2515 return 0;
2516 }
2517
kvm_arch_init(MachineState * ms,KVMState * s)2518 int kvm_arch_init(MachineState *ms, KVMState *s)
2519 {
2520 uint64_t identity_base = 0xfffbc000;
2521 uint64_t shadow_mem;
2522 int ret;
2523 struct utsname utsname;
2524 Error *local_err = NULL;
2525
2526 /*
2527 * Initialize SEV context, if required
2528 *
2529 * If no memory encryption is requested (ms->cgs == NULL) this is
2530 * a no-op.
2531 *
2532 * It's also a no-op if a non-SEV confidential guest support
2533 * mechanism is selected. SEV is the only mechanism available to
2534 * select on x86 at present, so this doesn't arise, but if new
2535 * mechanisms are supported in future (e.g. TDX), they'll need
2536 * their own initialization either here or elsewhere.
2537 */
2538 ret = sev_kvm_init(ms->cgs, &local_err);
2539 if (ret < 0) {
2540 error_report_err(local_err);
2541 return ret;
2542 }
2543
2544 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2545 has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
2546
2547 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2548
2549 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2550 if (has_exception_payload) {
2551 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2552 if (ret < 0) {
2553 error_report("kvm: Failed to enable exception payload cap: %s",
2554 strerror(-ret));
2555 return ret;
2556 }
2557 }
2558
2559 has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
2560 if (has_triple_fault_event) {
2561 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
2562 if (ret < 0) {
2563 error_report("kvm: Failed to enable triple fault event cap: %s",
2564 strerror(-ret));
2565 return ret;
2566 }
2567 }
2568
2569 if (s->xen_version) {
2570 #ifdef CONFIG_XEN_EMU
2571 if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) {
2572 error_report("kvm: Xen support only available in PC machine");
2573 return -ENOTSUP;
2574 }
2575 /* hyperv_enabled() doesn't work yet. */
2576 uint32_t msr = XEN_HYPERCALL_MSR;
2577 ret = kvm_xen_init(s, msr);
2578 if (ret < 0) {
2579 return ret;
2580 }
2581 #else
2582 error_report("kvm: Xen support not enabled in qemu");
2583 return -ENOTSUP;
2584 #endif
2585 }
2586
2587 ret = kvm_get_supported_msrs(s);
2588 if (ret < 0) {
2589 return ret;
2590 }
2591
2592 kvm_get_supported_feature_msrs(s);
2593
2594 uname(&utsname);
2595 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2596
2597 /*
2598 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2599 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2600 * Since these must be part of guest physical memory, we need to allocate
2601 * them, both by setting their start addresses in the kernel and by
2602 * creating a corresponding e820 entry. We need 4 pages before the BIOS,
2603 * so this value allows up to 16M BIOSes.
2604 */
2605 identity_base = 0xfeffc000;
2606 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2607 if (ret < 0) {
2608 return ret;
2609 }
2610
2611 /* Set TSS base one page after EPT identity map. */
2612 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2613 if (ret < 0) {
2614 return ret;
2615 }
2616
2617 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2618 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2619 if (ret < 0) {
2620 fprintf(stderr, "e820_add_entry() table is full\n");
2621 return ret;
2622 }
2623
2624 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2625 if (shadow_mem != -1) {
2626 shadow_mem /= 4096;
2627 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2628 if (ret < 0) {
2629 return ret;
2630 }
2631 }
2632
2633 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2634 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2635 x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2636 smram_machine_done.notify = register_smram_listener;
2637 qemu_add_machine_init_done_notifier(&smram_machine_done);
2638 }
2639
2640 if (enable_cpu_pm) {
2641 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2642 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2643 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2644 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2645 #endif
2646 if (disable_exits) {
2647 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2648 KVM_X86_DISABLE_EXITS_HLT |
2649 KVM_X86_DISABLE_EXITS_PAUSE |
2650 KVM_X86_DISABLE_EXITS_CSTATE);
2651 }
2652
2653 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2654 disable_exits);
2655 if (ret < 0) {
2656 error_report("kvm: guest stopping CPU not supported: %s",
2657 strerror(-ret));
2658 }
2659 }
2660
2661 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
2662 X86MachineState *x86ms = X86_MACHINE(ms);
2663
2664 if (x86ms->bus_lock_ratelimit > 0) {
2665 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
2666 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
2667 error_report("kvm: bus lock detection unsupported");
2668 return -ENOTSUP;
2669 }
2670 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
2671 KVM_BUS_LOCK_DETECTION_EXIT);
2672 if (ret < 0) {
2673 error_report("kvm: Failed to enable bus lock detection cap: %s",
2674 strerror(-ret));
2675 return ret;
2676 }
2677 ratelimit_init(&bus_lock_ratelimit_ctrl);
2678 ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
2679 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
2680 }
2681 }
2682
2683 if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
2684 kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
2685 uint64_t notify_window_flags =
2686 ((uint64_t)s->notify_window << 32) |
2687 KVM_X86_NOTIFY_VMEXIT_ENABLED |
2688 KVM_X86_NOTIFY_VMEXIT_USER;
2689 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
2690 notify_window_flags);
2691 if (ret < 0) {
2692 error_report("kvm: Failed to enable notify vmexit cap: %s",
2693 strerror(-ret));
2694 return ret;
2695 }
2696 }
2697 if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
2698 bool r;
2699
2700 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
2701 KVM_MSR_EXIT_REASON_FILTER);
2702 if (ret) {
2703 error_report("Could not enable user space MSRs: %s",
2704 strerror(-ret));
2705 exit(1);
2706 }
2707
2708 r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
2709 kvm_rdmsr_core_thread_count, NULL);
2710 if (!r) {
2711 error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
2712 strerror(-ret));
2713 exit(1);
2714 }
2715 }
2716
2717 return 0;
2718 }
2719
set_v8086_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2720 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2721 {
2722 lhs->selector = rhs->selector;
2723 lhs->base = rhs->base;
2724 lhs->limit = rhs->limit;
2725 lhs->type = 3;
2726 lhs->present = 1;
2727 lhs->dpl = 3;
2728 lhs->db = 0;
2729 lhs->s = 1;
2730 lhs->l = 0;
2731 lhs->g = 0;
2732 lhs->avl = 0;
2733 lhs->unusable = 0;
2734 }
2735
set_seg(struct kvm_segment * lhs,const SegmentCache * rhs)2736 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2737 {
2738 unsigned flags = rhs->flags;
2739 lhs->selector = rhs->selector;
2740 lhs->base = rhs->base;
2741 lhs->limit = rhs->limit;
2742 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2743 lhs->present = (flags & DESC_P_MASK) != 0;
2744 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2745 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2746 lhs->s = (flags & DESC_S_MASK) != 0;
2747 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2748 lhs->g = (flags & DESC_G_MASK) != 0;
2749 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2750 lhs->unusable = !lhs->present;
2751 lhs->padding = 0;
2752 }
2753
get_seg(SegmentCache * lhs,const struct kvm_segment * rhs)2754 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2755 {
2756 lhs->selector = rhs->selector;
2757 lhs->base = rhs->base;
2758 lhs->limit = rhs->limit;
2759 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2760 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2761 (rhs->dpl << DESC_DPL_SHIFT) |
2762 (rhs->db << DESC_B_SHIFT) |
2763 (rhs->s * DESC_S_MASK) |
2764 (rhs->l << DESC_L_SHIFT) |
2765 (rhs->g * DESC_G_MASK) |
2766 (rhs->avl * DESC_AVL_MASK);
2767 }
2768
kvm_getput_reg(__u64 * kvm_reg,target_ulong * qemu_reg,int set)2769 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2770 {
2771 if (set) {
2772 *kvm_reg = *qemu_reg;
2773 } else {
2774 *qemu_reg = *kvm_reg;
2775 }
2776 }
2777
kvm_getput_regs(X86CPU * cpu,int set)2778 static int kvm_getput_regs(X86CPU *cpu, int set)
2779 {
2780 CPUX86State *env = &cpu->env;
2781 struct kvm_regs regs;
2782 int ret = 0;
2783
2784 if (!set) {
2785 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
2786 if (ret < 0) {
2787 return ret;
2788 }
2789 }
2790
2791 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
2792 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
2793 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
2794 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
2795 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
2796 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
2797 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
2798 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
2799 #ifdef TARGET_X86_64
2800 kvm_getput_reg(®s.r8, &env->regs[8], set);
2801 kvm_getput_reg(®s.r9, &env->regs[9], set);
2802 kvm_getput_reg(®s.r10, &env->regs[10], set);
2803 kvm_getput_reg(®s.r11, &env->regs[11], set);
2804 kvm_getput_reg(®s.r12, &env->regs[12], set);
2805 kvm_getput_reg(®s.r13, &env->regs[13], set);
2806 kvm_getput_reg(®s.r14, &env->regs[14], set);
2807 kvm_getput_reg(®s.r15, &env->regs[15], set);
2808 #endif
2809
2810 kvm_getput_reg(®s.rflags, &env->eflags, set);
2811 kvm_getput_reg(®s.rip, &env->eip, set);
2812
2813 if (set) {
2814 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
2815 }
2816
2817 return ret;
2818 }
2819
kvm_put_xsave(X86CPU * cpu)2820 static int kvm_put_xsave(X86CPU *cpu)
2821 {
2822 CPUX86State *env = &cpu->env;
2823 void *xsave = env->xsave_buf;
2824
2825 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
2826
2827 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2828 }
2829
kvm_put_xcrs(X86CPU * cpu)2830 static int kvm_put_xcrs(X86CPU *cpu)
2831 {
2832 CPUX86State *env = &cpu->env;
2833 struct kvm_xcrs xcrs = {};
2834
2835 if (!has_xcrs) {
2836 return 0;
2837 }
2838
2839 xcrs.nr_xcrs = 1;
2840 xcrs.flags = 0;
2841 xcrs.xcrs[0].xcr = 0;
2842 xcrs.xcrs[0].value = env->xcr0;
2843 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2844 }
2845
kvm_put_sregs(X86CPU * cpu)2846 static int kvm_put_sregs(X86CPU *cpu)
2847 {
2848 CPUX86State *env = &cpu->env;
2849 struct kvm_sregs sregs;
2850
2851 /*
2852 * The interrupt_bitmap is ignored because KVM_SET_SREGS is
2853 * always followed by KVM_SET_VCPU_EVENTS.
2854 */
2855 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2856
2857 if ((env->eflags & VM_MASK)) {
2858 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2859 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2860 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2861 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2862 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2863 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2864 } else {
2865 set_seg(&sregs.cs, &env->segs[R_CS]);
2866 set_seg(&sregs.ds, &env->segs[R_DS]);
2867 set_seg(&sregs.es, &env->segs[R_ES]);
2868 set_seg(&sregs.fs, &env->segs[R_FS]);
2869 set_seg(&sregs.gs, &env->segs[R_GS]);
2870 set_seg(&sregs.ss, &env->segs[R_SS]);
2871 }
2872
2873 set_seg(&sregs.tr, &env->tr);
2874 set_seg(&sregs.ldt, &env->ldt);
2875
2876 sregs.idt.limit = env->idt.limit;
2877 sregs.idt.base = env->idt.base;
2878 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2879 sregs.gdt.limit = env->gdt.limit;
2880 sregs.gdt.base = env->gdt.base;
2881 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2882
2883 sregs.cr0 = env->cr[0];
2884 sregs.cr2 = env->cr[2];
2885 sregs.cr3 = env->cr[3];
2886 sregs.cr4 = env->cr[4];
2887
2888 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2889 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2890
2891 sregs.efer = env->efer;
2892
2893 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2894 }
2895
kvm_put_sregs2(X86CPU * cpu)2896 static int kvm_put_sregs2(X86CPU *cpu)
2897 {
2898 CPUX86State *env = &cpu->env;
2899 struct kvm_sregs2 sregs;
2900 int i;
2901
2902 sregs.flags = 0;
2903
2904 if ((env->eflags & VM_MASK)) {
2905 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2906 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2907 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2908 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2909 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2910 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2911 } else {
2912 set_seg(&sregs.cs, &env->segs[R_CS]);
2913 set_seg(&sregs.ds, &env->segs[R_DS]);
2914 set_seg(&sregs.es, &env->segs[R_ES]);
2915 set_seg(&sregs.fs, &env->segs[R_FS]);
2916 set_seg(&sregs.gs, &env->segs[R_GS]);
2917 set_seg(&sregs.ss, &env->segs[R_SS]);
2918 }
2919
2920 set_seg(&sregs.tr, &env->tr);
2921 set_seg(&sregs.ldt, &env->ldt);
2922
2923 sregs.idt.limit = env->idt.limit;
2924 sregs.idt.base = env->idt.base;
2925 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2926 sregs.gdt.limit = env->gdt.limit;
2927 sregs.gdt.base = env->gdt.base;
2928 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2929
2930 sregs.cr0 = env->cr[0];
2931 sregs.cr2 = env->cr[2];
2932 sregs.cr3 = env->cr[3];
2933 sregs.cr4 = env->cr[4];
2934
2935 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2936 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2937
2938 sregs.efer = env->efer;
2939
2940 if (env->pdptrs_valid) {
2941 for (i = 0; i < 4; i++) {
2942 sregs.pdptrs[i] = env->pdptrs[i];
2943 }
2944 sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
2945 }
2946
2947 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs);
2948 }
2949
2950
kvm_msr_buf_reset(X86CPU * cpu)2951 static void kvm_msr_buf_reset(X86CPU *cpu)
2952 {
2953 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2954 }
2955
kvm_msr_entry_add(X86CPU * cpu,uint32_t index,uint64_t value)2956 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2957 {
2958 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2959 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2960 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2961
2962 assert((void *)(entry + 1) <= limit);
2963
2964 entry->index = index;
2965 entry->reserved = 0;
2966 entry->data = value;
2967 msrs->nmsrs++;
2968 }
2969
kvm_put_one_msr(X86CPU * cpu,int index,uint64_t value)2970 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2971 {
2972 kvm_msr_buf_reset(cpu);
2973 kvm_msr_entry_add(cpu, index, value);
2974
2975 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2976 }
2977
kvm_get_one_msr(X86CPU * cpu,int index,uint64_t * value)2978 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
2979 {
2980 int ret;
2981 struct {
2982 struct kvm_msrs info;
2983 struct kvm_msr_entry entries[1];
2984 } msr_data = {
2985 .info.nmsrs = 1,
2986 .entries[0].index = index,
2987 };
2988
2989 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
2990 if (ret < 0) {
2991 return ret;
2992 }
2993 assert(ret == 1);
2994 *value = msr_data.entries[0].data;
2995 return ret;
2996 }
kvm_put_apicbase(X86CPU * cpu,uint64_t value)2997 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2998 {
2999 int ret;
3000
3001 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
3002 assert(ret == 1);
3003 }
3004
kvm_put_tscdeadline_msr(X86CPU * cpu)3005 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
3006 {
3007 CPUX86State *env = &cpu->env;
3008 int ret;
3009
3010 if (!has_msr_tsc_deadline) {
3011 return 0;
3012 }
3013
3014 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
3015 if (ret < 0) {
3016 return ret;
3017 }
3018
3019 assert(ret == 1);
3020 return 0;
3021 }
3022
3023 /*
3024 * Provide a separate write service for the feature control MSR in order to
3025 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
3026 * before writing any other state because forcibly leaving nested mode
3027 * invalidates the VCPU state.
3028 */
kvm_put_msr_feature_control(X86CPU * cpu)3029 static int kvm_put_msr_feature_control(X86CPU *cpu)
3030 {
3031 int ret;
3032
3033 if (!has_msr_feature_control) {
3034 return 0;
3035 }
3036
3037 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
3038 cpu->env.msr_ia32_feature_control);
3039 if (ret < 0) {
3040 return ret;
3041 }
3042
3043 assert(ret == 1);
3044 return 0;
3045 }
3046
make_vmx_msr_value(uint32_t index,uint32_t features)3047 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
3048 {
3049 uint32_t default1, can_be_one, can_be_zero;
3050 uint32_t must_be_one;
3051
3052 switch (index) {
3053 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3054 default1 = 0x00000016;
3055 break;
3056 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3057 default1 = 0x0401e172;
3058 break;
3059 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3060 default1 = 0x000011ff;
3061 break;
3062 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3063 default1 = 0x00036dff;
3064 break;
3065 case MSR_IA32_VMX_PROCBASED_CTLS2:
3066 default1 = 0;
3067 break;
3068 default:
3069 abort();
3070 }
3071
3072 /* If a feature bit is set, the control can be either set or clear.
3073 * Otherwise the value is limited to either 0 or 1 by default1.
3074 */
3075 can_be_one = features | default1;
3076 can_be_zero = features | ~default1;
3077 must_be_one = ~can_be_zero;
3078
3079 /*
3080 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3081 * Bit 32:63 -> 1 if the control bit can be one.
3082 */
3083 return must_be_one | (((uint64_t)can_be_one) << 32);
3084 }
3085
kvm_msr_entry_add_vmx(X86CPU * cpu,FeatureWordArray f)3086 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
3087 {
3088 uint64_t kvm_vmx_basic =
3089 kvm_arch_get_supported_msr_feature(kvm_state,
3090 MSR_IA32_VMX_BASIC);
3091
3092 if (!kvm_vmx_basic) {
3093 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3094 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3095 */
3096 return;
3097 }
3098
3099 uint64_t kvm_vmx_misc =
3100 kvm_arch_get_supported_msr_feature(kvm_state,
3101 MSR_IA32_VMX_MISC);
3102 uint64_t kvm_vmx_ept_vpid =
3103 kvm_arch_get_supported_msr_feature(kvm_state,
3104 MSR_IA32_VMX_EPT_VPID_CAP);
3105
3106 /*
3107 * If the guest is 64-bit, a value of 1 is allowed for the host address
3108 * space size vmexit control.
3109 */
3110 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
3111 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
3112
3113 /*
3114 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
3115 * not change them for backwards compatibility.
3116 */
3117 uint64_t fixed_vmx_basic = kvm_vmx_basic &
3118 (MSR_VMX_BASIC_VMCS_REVISION_MASK |
3119 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
3120 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
3121
3122 /*
3123 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
3124 * change in the future but are always zero for now, clear them to be
3125 * future proof. Bits 32-63 in theory could change, though KVM does
3126 * not support dual-monitor treatment and probably never will; mask
3127 * them out as well.
3128 */
3129 uint64_t fixed_vmx_misc = kvm_vmx_misc &
3130 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
3131 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
3132
3133 /*
3134 * EPT memory types should not change either, so we do not bother
3135 * adding features for them.
3136 */
3137 uint64_t fixed_vmx_ept_mask =
3138 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
3139 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
3140 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
3141
3142 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3143 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3144 f[FEAT_VMX_PROCBASED_CTLS]));
3145 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3146 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3147 f[FEAT_VMX_PINBASED_CTLS]));
3148 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3149 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
3150 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
3151 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3152 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3153 f[FEAT_VMX_ENTRY_CTLS]));
3154 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
3155 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
3156 f[FEAT_VMX_SECONDARY_CTLS]));
3157 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
3158 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
3159 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
3160 f[FEAT_VMX_BASIC] | fixed_vmx_basic);
3161 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
3162 f[FEAT_VMX_MISC] | fixed_vmx_misc);
3163 if (has_msr_vmx_vmfunc) {
3164 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
3165 }
3166
3167 /*
3168 * Just to be safe, write these with constant values. The CRn_FIXED1
3169 * MSRs are generated by KVM based on the vCPU's CPUID.
3170 */
3171 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
3172 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
3173 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
3174 CR4_VMXE_MASK);
3175
3176 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
3177 /* TSC multiplier (0x2032). */
3178 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
3179 } else {
3180 /* Preemption timer (0x482E). */
3181 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
3182 }
3183 }
3184
kvm_msr_entry_add_perf(X86CPU * cpu,FeatureWordArray f)3185 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
3186 {
3187 uint64_t kvm_perf_cap =
3188 kvm_arch_get_supported_msr_feature(kvm_state,
3189 MSR_IA32_PERF_CAPABILITIES);
3190
3191 if (kvm_perf_cap) {
3192 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
3193 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
3194 }
3195 }
3196
kvm_buf_set_msrs(X86CPU * cpu)3197 static int kvm_buf_set_msrs(X86CPU *cpu)
3198 {
3199 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
3200 if (ret < 0) {
3201 return ret;
3202 }
3203
3204 if (ret < cpu->kvm_msr_buf->nmsrs) {
3205 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3206 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
3207 (uint32_t)e->index, (uint64_t)e->data);
3208 }
3209
3210 assert(ret == cpu->kvm_msr_buf->nmsrs);
3211 return 0;
3212 }
3213
kvm_init_msrs(X86CPU * cpu)3214 static void kvm_init_msrs(X86CPU *cpu)
3215 {
3216 CPUX86State *env = &cpu->env;
3217
3218 kvm_msr_buf_reset(cpu);
3219 if (has_msr_arch_capabs) {
3220 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
3221 env->features[FEAT_ARCH_CAPABILITIES]);
3222 }
3223
3224 if (has_msr_core_capabs) {
3225 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
3226 env->features[FEAT_CORE_CAPABILITY]);
3227 }
3228
3229 if (has_msr_perf_capabs && cpu->enable_pmu) {
3230 kvm_msr_entry_add_perf(cpu, env->features);
3231 }
3232
3233 if (has_msr_ucode_rev) {
3234 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
3235 }
3236
3237 /*
3238 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3239 * all kernels with MSR features should have them.
3240 */
3241 if (kvm_feature_msrs && cpu_has_vmx(env)) {
3242 kvm_msr_entry_add_vmx(cpu, env->features);
3243 }
3244
3245 assert(kvm_buf_set_msrs(cpu) == 0);
3246 }
3247
kvm_put_msrs(X86CPU * cpu,int level)3248 static int kvm_put_msrs(X86CPU *cpu, int level)
3249 {
3250 CPUX86State *env = &cpu->env;
3251 int i;
3252
3253 kvm_msr_buf_reset(cpu);
3254
3255 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
3256 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
3257 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
3258 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
3259 if (has_msr_star) {
3260 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
3261 }
3262 if (has_msr_hsave_pa) {
3263 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
3264 }
3265 if (has_msr_tsc_aux) {
3266 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
3267 }
3268 if (has_msr_tsc_adjust) {
3269 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
3270 }
3271 if (has_msr_misc_enable) {
3272 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
3273 env->msr_ia32_misc_enable);
3274 }
3275 if (has_msr_smbase) {
3276 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
3277 }
3278 if (has_msr_smi_count) {
3279 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
3280 }
3281 if (has_msr_pkrs) {
3282 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
3283 }
3284 if (has_msr_bndcfgs) {
3285 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
3286 }
3287 if (has_msr_xss) {
3288 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
3289 }
3290 if (has_msr_umwait) {
3291 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
3292 }
3293 if (has_msr_spec_ctrl) {
3294 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
3295 }
3296 if (has_tsc_scale_msr) {
3297 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
3298 }
3299
3300 if (has_msr_tsx_ctrl) {
3301 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
3302 }
3303 if (has_msr_virt_ssbd) {
3304 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
3305 }
3306
3307 #ifdef TARGET_X86_64
3308 if (lm_capable_kernel) {
3309 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
3310 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
3311 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
3312 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
3313 }
3314 #endif
3315
3316 /*
3317 * The following MSRs have side effects on the guest or are too heavy
3318 * for normal writeback. Limit them to reset or full state updates.
3319 */
3320 if (level >= KVM_PUT_RESET_STATE) {
3321 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
3322 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
3323 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
3324 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3325 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
3326 }
3327 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3328 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
3329 }
3330 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3331 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
3332 }
3333 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3334 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
3335 }
3336
3337 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3338 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
3339 }
3340
3341 if (has_architectural_pmu_version > 0) {
3342 if (has_architectural_pmu_version > 1) {
3343 /* Stop the counter. */
3344 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3345 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3346 }
3347
3348 /* Set the counter values. */
3349 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3350 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
3351 env->msr_fixed_counters[i]);
3352 }
3353 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3354 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
3355 env->msr_gp_counters[i]);
3356 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
3357 env->msr_gp_evtsel[i]);
3358 }
3359 if (has_architectural_pmu_version > 1) {
3360 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
3361 env->msr_global_status);
3362 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
3363 env->msr_global_ovf_ctrl);
3364
3365 /* Now start the PMU. */
3366 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
3367 env->msr_fixed_ctr_ctrl);
3368 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
3369 env->msr_global_ctrl);
3370 }
3371 }
3372 /*
3373 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
3374 * only sync them to KVM on the first cpu
3375 */
3376 if (current_cpu == first_cpu) {
3377 if (has_msr_hv_hypercall) {
3378 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
3379 env->msr_hv_guest_os_id);
3380 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
3381 env->msr_hv_hypercall);
3382 }
3383 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3384 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
3385 env->msr_hv_tsc);
3386 }
3387 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3388 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
3389 env->msr_hv_reenlightenment_control);
3390 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
3391 env->msr_hv_tsc_emulation_control);
3392 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
3393 env->msr_hv_tsc_emulation_status);
3394 }
3395 #ifdef CONFIG_SYNDBG
3396 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
3397 has_msr_hv_syndbg_options) {
3398 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
3399 hyperv_syndbg_query_options());
3400 }
3401 #endif
3402 }
3403 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3404 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
3405 env->msr_hv_vapic);
3406 }
3407 if (has_msr_hv_crash) {
3408 int j;
3409
3410 for (j = 0; j < HV_CRASH_PARAMS; j++)
3411 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
3412 env->msr_hv_crash_params[j]);
3413
3414 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
3415 }
3416 if (has_msr_hv_runtime) {
3417 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
3418 }
3419 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
3420 && hv_vpindex_settable) {
3421 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
3422 hyperv_vp_index(CPU(cpu)));
3423 }
3424 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3425 int j;
3426
3427 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
3428
3429 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
3430 env->msr_hv_synic_control);
3431 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
3432 env->msr_hv_synic_evt_page);
3433 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
3434 env->msr_hv_synic_msg_page);
3435
3436 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
3437 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
3438 env->msr_hv_synic_sint[j]);
3439 }
3440 }
3441 if (has_msr_hv_stimer) {
3442 int j;
3443
3444 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
3445 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
3446 env->msr_hv_stimer_config[j]);
3447 }
3448
3449 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
3450 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
3451 env->msr_hv_stimer_count[j]);
3452 }
3453 }
3454 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3455 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
3456
3457 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
3458 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
3459 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
3460 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
3461 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
3462 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
3463 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
3464 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
3465 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
3466 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
3467 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
3468 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
3469 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3470 /* The CPU GPs if we write to a bit above the physical limit of
3471 * the host CPU (and KVM emulates that)
3472 */
3473 uint64_t mask = env->mtrr_var[i].mask;
3474 mask &= phys_mask;
3475
3476 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
3477 env->mtrr_var[i].base);
3478 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
3479 }
3480 }
3481 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3482 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
3483 0x14, 1, R_EAX) & 0x7;
3484
3485 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
3486 env->msr_rtit_ctrl);
3487 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
3488 env->msr_rtit_status);
3489 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
3490 env->msr_rtit_output_base);
3491 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
3492 env->msr_rtit_output_mask);
3493 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
3494 env->msr_rtit_cr3_match);
3495 for (i = 0; i < addr_num; i++) {
3496 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
3497 env->msr_rtit_addrs[i]);
3498 }
3499 }
3500
3501 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3502 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
3503 env->msr_ia32_sgxlepubkeyhash[0]);
3504 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
3505 env->msr_ia32_sgxlepubkeyhash[1]);
3506 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
3507 env->msr_ia32_sgxlepubkeyhash[2]);
3508 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
3509 env->msr_ia32_sgxlepubkeyhash[3]);
3510 }
3511
3512 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3513 kvm_msr_entry_add(cpu, MSR_IA32_XFD,
3514 env->msr_xfd);
3515 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
3516 env->msr_xfd_err);
3517 }
3518
3519 if (kvm_enabled() && cpu->enable_pmu &&
3520 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3521 uint64_t depth;
3522 int ret;
3523
3524 /*
3525 * Only migrate Arch LBR states when the host Arch LBR depth
3526 * equals that of source guest's, this is to avoid mismatch
3527 * of guest/host config for the msr hence avoid unexpected
3528 * misbehavior.
3529 */
3530 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3531
3532 if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
3533 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
3534 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
3535
3536 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3537 if (!env->lbr_records[i].from) {
3538 continue;
3539 }
3540 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
3541 env->lbr_records[i].from);
3542 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
3543 env->lbr_records[i].to);
3544 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
3545 env->lbr_records[i].info);
3546 }
3547 }
3548 }
3549
3550 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
3551 * kvm_put_msr_feature_control. */
3552 }
3553
3554 if (env->mcg_cap) {
3555 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
3556 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
3557 if (has_msr_mcg_ext_ctl) {
3558 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
3559 }
3560 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3561 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
3562 }
3563 }
3564
3565 return kvm_buf_set_msrs(cpu);
3566 }
3567
3568
kvm_get_xsave(X86CPU * cpu)3569 static int kvm_get_xsave(X86CPU *cpu)
3570 {
3571 CPUX86State *env = &cpu->env;
3572 void *xsave = env->xsave_buf;
3573 int type, ret;
3574
3575 type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
3576 ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
3577 if (ret < 0) {
3578 return ret;
3579 }
3580 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
3581
3582 return 0;
3583 }
3584
kvm_get_xcrs(X86CPU * cpu)3585 static int kvm_get_xcrs(X86CPU *cpu)
3586 {
3587 CPUX86State *env = &cpu->env;
3588 int i, ret;
3589 struct kvm_xcrs xcrs;
3590
3591 if (!has_xcrs) {
3592 return 0;
3593 }
3594
3595 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3596 if (ret < 0) {
3597 return ret;
3598 }
3599
3600 for (i = 0; i < xcrs.nr_xcrs; i++) {
3601 /* Only support xcr0 now */
3602 if (xcrs.xcrs[i].xcr == 0) {
3603 env->xcr0 = xcrs.xcrs[i].value;
3604 break;
3605 }
3606 }
3607 return 0;
3608 }
3609
kvm_get_sregs(X86CPU * cpu)3610 static int kvm_get_sregs(X86CPU *cpu)
3611 {
3612 CPUX86State *env = &cpu->env;
3613 struct kvm_sregs sregs;
3614 int ret;
3615
3616 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3617 if (ret < 0) {
3618 return ret;
3619 }
3620
3621 /*
3622 * The interrupt_bitmap is ignored because KVM_GET_SREGS is
3623 * always preceded by KVM_GET_VCPU_EVENTS.
3624 */
3625
3626 get_seg(&env->segs[R_CS], &sregs.cs);
3627 get_seg(&env->segs[R_DS], &sregs.ds);
3628 get_seg(&env->segs[R_ES], &sregs.es);
3629 get_seg(&env->segs[R_FS], &sregs.fs);
3630 get_seg(&env->segs[R_GS], &sregs.gs);
3631 get_seg(&env->segs[R_SS], &sregs.ss);
3632
3633 get_seg(&env->tr, &sregs.tr);
3634 get_seg(&env->ldt, &sregs.ldt);
3635
3636 env->idt.limit = sregs.idt.limit;
3637 env->idt.base = sregs.idt.base;
3638 env->gdt.limit = sregs.gdt.limit;
3639 env->gdt.base = sregs.gdt.base;
3640
3641 env->cr[0] = sregs.cr0;
3642 env->cr[2] = sregs.cr2;
3643 env->cr[3] = sregs.cr3;
3644 env->cr[4] = sregs.cr4;
3645
3646 env->efer = sregs.efer;
3647 if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
3648 env->cr[0] & CR0_PG_MASK) {
3649 env->efer |= MSR_EFER_LMA;
3650 }
3651
3652 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3653 x86_update_hflags(env);
3654
3655 return 0;
3656 }
3657
kvm_get_sregs2(X86CPU * cpu)3658 static int kvm_get_sregs2(X86CPU *cpu)
3659 {
3660 CPUX86State *env = &cpu->env;
3661 struct kvm_sregs2 sregs;
3662 int i, ret;
3663
3664 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs);
3665 if (ret < 0) {
3666 return ret;
3667 }
3668
3669 get_seg(&env->segs[R_CS], &sregs.cs);
3670 get_seg(&env->segs[R_DS], &sregs.ds);
3671 get_seg(&env->segs[R_ES], &sregs.es);
3672 get_seg(&env->segs[R_FS], &sregs.fs);
3673 get_seg(&env->segs[R_GS], &sregs.gs);
3674 get_seg(&env->segs[R_SS], &sregs.ss);
3675
3676 get_seg(&env->tr, &sregs.tr);
3677 get_seg(&env->ldt, &sregs.ldt);
3678
3679 env->idt.limit = sregs.idt.limit;
3680 env->idt.base = sregs.idt.base;
3681 env->gdt.limit = sregs.gdt.limit;
3682 env->gdt.base = sregs.gdt.base;
3683
3684 env->cr[0] = sregs.cr0;
3685 env->cr[2] = sregs.cr2;
3686 env->cr[3] = sregs.cr3;
3687 env->cr[4] = sregs.cr4;
3688
3689 env->efer = sregs.efer;
3690 if (sev_es_enabled() && env->efer & MSR_EFER_LME &&
3691 env->cr[0] & CR0_PG_MASK) {
3692 env->efer |= MSR_EFER_LMA;
3693 }
3694
3695 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
3696
3697 if (env->pdptrs_valid) {
3698 for (i = 0; i < 4; i++) {
3699 env->pdptrs[i] = sregs.pdptrs[i];
3700 }
3701 }
3702
3703 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3704 x86_update_hflags(env);
3705
3706 return 0;
3707 }
3708
kvm_get_msrs(X86CPU * cpu)3709 static int kvm_get_msrs(X86CPU *cpu)
3710 {
3711 CPUX86State *env = &cpu->env;
3712 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3713 int ret, i;
3714 uint64_t mtrr_top_bits;
3715
3716 kvm_msr_buf_reset(cpu);
3717
3718 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3719 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3720 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3721 kvm_msr_entry_add(cpu, MSR_PAT, 0);
3722 if (has_msr_star) {
3723 kvm_msr_entry_add(cpu, MSR_STAR, 0);
3724 }
3725 if (has_msr_hsave_pa) {
3726 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3727 }
3728 if (has_msr_tsc_aux) {
3729 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3730 }
3731 if (has_msr_tsc_adjust) {
3732 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3733 }
3734 if (has_msr_tsc_deadline) {
3735 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3736 }
3737 if (has_msr_misc_enable) {
3738 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3739 }
3740 if (has_msr_smbase) {
3741 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3742 }
3743 if (has_msr_smi_count) {
3744 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3745 }
3746 if (has_msr_feature_control) {
3747 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3748 }
3749 if (has_msr_pkrs) {
3750 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
3751 }
3752 if (has_msr_bndcfgs) {
3753 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3754 }
3755 if (has_msr_xss) {
3756 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3757 }
3758 if (has_msr_umwait) {
3759 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3760 }
3761 if (has_msr_spec_ctrl) {
3762 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3763 }
3764 if (has_tsc_scale_msr) {
3765 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
3766 }
3767
3768 if (has_msr_tsx_ctrl) {
3769 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3770 }
3771 if (has_msr_virt_ssbd) {
3772 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3773 }
3774 if (!env->tsc_valid) {
3775 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3776 env->tsc_valid = !runstate_is_running();
3777 }
3778
3779 #ifdef TARGET_X86_64
3780 if (lm_capable_kernel) {
3781 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3782 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3783 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3784 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3785 }
3786 #endif
3787 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3788 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3789 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3790 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
3791 }
3792 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3793 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3794 }
3795 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3796 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3797 }
3798 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3799 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3800 }
3801 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3802 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3803 }
3804 if (has_architectural_pmu_version > 0) {
3805 if (has_architectural_pmu_version > 1) {
3806 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3807 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3808 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3809 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3810 }
3811 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3812 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3813 }
3814 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3815 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3816 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3817 }
3818 }
3819
3820 if (env->mcg_cap) {
3821 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3822 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3823 if (has_msr_mcg_ext_ctl) {
3824 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3825 }
3826 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3827 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3828 }
3829 }
3830
3831 if (has_msr_hv_hypercall) {
3832 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3833 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3834 }
3835 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3836 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3837 }
3838 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3839 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3840 }
3841 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3842 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3843 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3844 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3845 }
3846 if (has_msr_hv_syndbg_options) {
3847 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0);
3848 }
3849 if (has_msr_hv_crash) {
3850 int j;
3851
3852 for (j = 0; j < HV_CRASH_PARAMS; j++) {
3853 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3854 }
3855 }
3856 if (has_msr_hv_runtime) {
3857 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3858 }
3859 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3860 uint32_t msr;
3861
3862 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3863 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3864 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3865 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3866 kvm_msr_entry_add(cpu, msr, 0);
3867 }
3868 }
3869 if (has_msr_hv_stimer) {
3870 uint32_t msr;
3871
3872 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3873 msr++) {
3874 kvm_msr_entry_add(cpu, msr, 0);
3875 }
3876 }
3877 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3878 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3879 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3880 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3881 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3882 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3883 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3884 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3885 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3886 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3887 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3888 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3889 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3890 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3891 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3892 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3893 }
3894 }
3895
3896 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3897 int addr_num =
3898 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3899
3900 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3901 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3902 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3903 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3904 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3905 for (i = 0; i < addr_num; i++) {
3906 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3907 }
3908 }
3909
3910 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3911 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
3912 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
3913 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
3914 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
3915 }
3916
3917 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3918 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
3919 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
3920 }
3921
3922 if (kvm_enabled() && cpu->enable_pmu &&
3923 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3924 uint64_t depth;
3925
3926 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3927 if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
3928 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
3929 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
3930
3931 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3932 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
3933 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
3934 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
3935 }
3936 }
3937 }
3938
3939 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3940 if (ret < 0) {
3941 return ret;
3942 }
3943
3944 if (ret < cpu->kvm_msr_buf->nmsrs) {
3945 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3946 error_report("error: failed to get MSR 0x%" PRIx32,
3947 (uint32_t)e->index);
3948 }
3949
3950 assert(ret == cpu->kvm_msr_buf->nmsrs);
3951 /*
3952 * MTRR masks: Each mask consists of 5 parts
3953 * a 10..0: must be zero
3954 * b 11 : valid bit
3955 * c n-1.12: actual mask bits
3956 * d 51..n: reserved must be zero
3957 * e 63.52: reserved must be zero
3958 *
3959 * 'n' is the number of physical bits supported by the CPU and is
3960 * apparently always <= 52. We know our 'n' but don't know what
3961 * the destinations 'n' is; it might be smaller, in which case
3962 * it masks (c) on loading. It might be larger, in which case
3963 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3964 * we're migrating to.
3965 */
3966
3967 if (cpu->fill_mtrr_mask) {
3968 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3969 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3970 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3971 } else {
3972 mtrr_top_bits = 0;
3973 }
3974
3975 for (i = 0; i < ret; i++) {
3976 uint32_t index = msrs[i].index;
3977 switch (index) {
3978 case MSR_IA32_SYSENTER_CS:
3979 env->sysenter_cs = msrs[i].data;
3980 break;
3981 case MSR_IA32_SYSENTER_ESP:
3982 env->sysenter_esp = msrs[i].data;
3983 break;
3984 case MSR_IA32_SYSENTER_EIP:
3985 env->sysenter_eip = msrs[i].data;
3986 break;
3987 case MSR_PAT:
3988 env->pat = msrs[i].data;
3989 break;
3990 case MSR_STAR:
3991 env->star = msrs[i].data;
3992 break;
3993 #ifdef TARGET_X86_64
3994 case MSR_CSTAR:
3995 env->cstar = msrs[i].data;
3996 break;
3997 case MSR_KERNELGSBASE:
3998 env->kernelgsbase = msrs[i].data;
3999 break;
4000 case MSR_FMASK:
4001 env->fmask = msrs[i].data;
4002 break;
4003 case MSR_LSTAR:
4004 env->lstar = msrs[i].data;
4005 break;
4006 #endif
4007 case MSR_IA32_TSC:
4008 env->tsc = msrs[i].data;
4009 break;
4010 case MSR_TSC_AUX:
4011 env->tsc_aux = msrs[i].data;
4012 break;
4013 case MSR_TSC_ADJUST:
4014 env->tsc_adjust = msrs[i].data;
4015 break;
4016 case MSR_IA32_TSCDEADLINE:
4017 env->tsc_deadline = msrs[i].data;
4018 break;
4019 case MSR_VM_HSAVE_PA:
4020 env->vm_hsave = msrs[i].data;
4021 break;
4022 case MSR_KVM_SYSTEM_TIME:
4023 env->system_time_msr = msrs[i].data;
4024 break;
4025 case MSR_KVM_WALL_CLOCK:
4026 env->wall_clock_msr = msrs[i].data;
4027 break;
4028 case MSR_MCG_STATUS:
4029 env->mcg_status = msrs[i].data;
4030 break;
4031 case MSR_MCG_CTL:
4032 env->mcg_ctl = msrs[i].data;
4033 break;
4034 case MSR_MCG_EXT_CTL:
4035 env->mcg_ext_ctl = msrs[i].data;
4036 break;
4037 case MSR_IA32_MISC_ENABLE:
4038 env->msr_ia32_misc_enable = msrs[i].data;
4039 break;
4040 case MSR_IA32_SMBASE:
4041 env->smbase = msrs[i].data;
4042 break;
4043 case MSR_SMI_COUNT:
4044 env->msr_smi_count = msrs[i].data;
4045 break;
4046 case MSR_IA32_FEATURE_CONTROL:
4047 env->msr_ia32_feature_control = msrs[i].data;
4048 break;
4049 case MSR_IA32_BNDCFGS:
4050 env->msr_bndcfgs = msrs[i].data;
4051 break;
4052 case MSR_IA32_XSS:
4053 env->xss = msrs[i].data;
4054 break;
4055 case MSR_IA32_UMWAIT_CONTROL:
4056 env->umwait = msrs[i].data;
4057 break;
4058 case MSR_IA32_PKRS:
4059 env->pkrs = msrs[i].data;
4060 break;
4061 default:
4062 if (msrs[i].index >= MSR_MC0_CTL &&
4063 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
4064 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
4065 }
4066 break;
4067 case MSR_KVM_ASYNC_PF_EN:
4068 env->async_pf_en_msr = msrs[i].data;
4069 break;
4070 case MSR_KVM_ASYNC_PF_INT:
4071 env->async_pf_int_msr = msrs[i].data;
4072 break;
4073 case MSR_KVM_PV_EOI_EN:
4074 env->pv_eoi_en_msr = msrs[i].data;
4075 break;
4076 case MSR_KVM_STEAL_TIME:
4077 env->steal_time_msr = msrs[i].data;
4078 break;
4079 case MSR_KVM_POLL_CONTROL: {
4080 env->poll_control_msr = msrs[i].data;
4081 break;
4082 }
4083 case MSR_CORE_PERF_FIXED_CTR_CTRL:
4084 env->msr_fixed_ctr_ctrl = msrs[i].data;
4085 break;
4086 case MSR_CORE_PERF_GLOBAL_CTRL:
4087 env->msr_global_ctrl = msrs[i].data;
4088 break;
4089 case MSR_CORE_PERF_GLOBAL_STATUS:
4090 env->msr_global_status = msrs[i].data;
4091 break;
4092 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
4093 env->msr_global_ovf_ctrl = msrs[i].data;
4094 break;
4095 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
4096 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
4097 break;
4098 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
4099 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
4100 break;
4101 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
4102 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
4103 break;
4104 case HV_X64_MSR_HYPERCALL:
4105 env->msr_hv_hypercall = msrs[i].data;
4106 break;
4107 case HV_X64_MSR_GUEST_OS_ID:
4108 env->msr_hv_guest_os_id = msrs[i].data;
4109 break;
4110 case HV_X64_MSR_APIC_ASSIST_PAGE:
4111 env->msr_hv_vapic = msrs[i].data;
4112 break;
4113 case HV_X64_MSR_REFERENCE_TSC:
4114 env->msr_hv_tsc = msrs[i].data;
4115 break;
4116 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4117 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
4118 break;
4119 case HV_X64_MSR_VP_RUNTIME:
4120 env->msr_hv_runtime = msrs[i].data;
4121 break;
4122 case HV_X64_MSR_SCONTROL:
4123 env->msr_hv_synic_control = msrs[i].data;
4124 break;
4125 case HV_X64_MSR_SIEFP:
4126 env->msr_hv_synic_evt_page = msrs[i].data;
4127 break;
4128 case HV_X64_MSR_SIMP:
4129 env->msr_hv_synic_msg_page = msrs[i].data;
4130 break;
4131 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
4132 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
4133 break;
4134 case HV_X64_MSR_STIMER0_CONFIG:
4135 case HV_X64_MSR_STIMER1_CONFIG:
4136 case HV_X64_MSR_STIMER2_CONFIG:
4137 case HV_X64_MSR_STIMER3_CONFIG:
4138 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
4139 msrs[i].data;
4140 break;
4141 case HV_X64_MSR_STIMER0_COUNT:
4142 case HV_X64_MSR_STIMER1_COUNT:
4143 case HV_X64_MSR_STIMER2_COUNT:
4144 case HV_X64_MSR_STIMER3_COUNT:
4145 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
4146 msrs[i].data;
4147 break;
4148 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4149 env->msr_hv_reenlightenment_control = msrs[i].data;
4150 break;
4151 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4152 env->msr_hv_tsc_emulation_control = msrs[i].data;
4153 break;
4154 case HV_X64_MSR_TSC_EMULATION_STATUS:
4155 env->msr_hv_tsc_emulation_status = msrs[i].data;
4156 break;
4157 case HV_X64_MSR_SYNDBG_OPTIONS:
4158 env->msr_hv_syndbg_options = msrs[i].data;
4159 break;
4160 case MSR_MTRRdefType:
4161 env->mtrr_deftype = msrs[i].data;
4162 break;
4163 case MSR_MTRRfix64K_00000:
4164 env->mtrr_fixed[0] = msrs[i].data;
4165 break;
4166 case MSR_MTRRfix16K_80000:
4167 env->mtrr_fixed[1] = msrs[i].data;
4168 break;
4169 case MSR_MTRRfix16K_A0000:
4170 env->mtrr_fixed[2] = msrs[i].data;
4171 break;
4172 case MSR_MTRRfix4K_C0000:
4173 env->mtrr_fixed[3] = msrs[i].data;
4174 break;
4175 case MSR_MTRRfix4K_C8000:
4176 env->mtrr_fixed[4] = msrs[i].data;
4177 break;
4178 case MSR_MTRRfix4K_D0000:
4179 env->mtrr_fixed[5] = msrs[i].data;
4180 break;
4181 case MSR_MTRRfix4K_D8000:
4182 env->mtrr_fixed[6] = msrs[i].data;
4183 break;
4184 case MSR_MTRRfix4K_E0000:
4185 env->mtrr_fixed[7] = msrs[i].data;
4186 break;
4187 case MSR_MTRRfix4K_E8000:
4188 env->mtrr_fixed[8] = msrs[i].data;
4189 break;
4190 case MSR_MTRRfix4K_F0000:
4191 env->mtrr_fixed[9] = msrs[i].data;
4192 break;
4193 case MSR_MTRRfix4K_F8000:
4194 env->mtrr_fixed[10] = msrs[i].data;
4195 break;
4196 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
4197 if (index & 1) {
4198 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
4199 mtrr_top_bits;
4200 } else {
4201 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
4202 }
4203 break;
4204 case MSR_IA32_SPEC_CTRL:
4205 env->spec_ctrl = msrs[i].data;
4206 break;
4207 case MSR_AMD64_TSC_RATIO:
4208 env->amd_tsc_scale_msr = msrs[i].data;
4209 break;
4210 case MSR_IA32_TSX_CTRL:
4211 env->tsx_ctrl = msrs[i].data;
4212 break;
4213 case MSR_VIRT_SSBD:
4214 env->virt_ssbd = msrs[i].data;
4215 break;
4216 case MSR_IA32_RTIT_CTL:
4217 env->msr_rtit_ctrl = msrs[i].data;
4218 break;
4219 case MSR_IA32_RTIT_STATUS:
4220 env->msr_rtit_status = msrs[i].data;
4221 break;
4222 case MSR_IA32_RTIT_OUTPUT_BASE:
4223 env->msr_rtit_output_base = msrs[i].data;
4224 break;
4225 case MSR_IA32_RTIT_OUTPUT_MASK:
4226 env->msr_rtit_output_mask = msrs[i].data;
4227 break;
4228 case MSR_IA32_RTIT_CR3_MATCH:
4229 env->msr_rtit_cr3_match = msrs[i].data;
4230 break;
4231 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
4232 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
4233 break;
4234 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
4235 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
4236 msrs[i].data;
4237 break;
4238 case MSR_IA32_XFD:
4239 env->msr_xfd = msrs[i].data;
4240 break;
4241 case MSR_IA32_XFD_ERR:
4242 env->msr_xfd_err = msrs[i].data;
4243 break;
4244 case MSR_ARCH_LBR_CTL:
4245 env->msr_lbr_ctl = msrs[i].data;
4246 break;
4247 case MSR_ARCH_LBR_DEPTH:
4248 env->msr_lbr_depth = msrs[i].data;
4249 break;
4250 case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
4251 env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
4252 break;
4253 case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
4254 env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
4255 break;
4256 case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
4257 env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
4258 break;
4259 }
4260 }
4261
4262 return 0;
4263 }
4264
kvm_put_mp_state(X86CPU * cpu)4265 static int kvm_put_mp_state(X86CPU *cpu)
4266 {
4267 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
4268
4269 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
4270 }
4271
kvm_get_mp_state(X86CPU * cpu)4272 static int kvm_get_mp_state(X86CPU *cpu)
4273 {
4274 CPUState *cs = CPU(cpu);
4275 CPUX86State *env = &cpu->env;
4276 struct kvm_mp_state mp_state;
4277 int ret;
4278
4279 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
4280 if (ret < 0) {
4281 return ret;
4282 }
4283 env->mp_state = mp_state.mp_state;
4284 if (kvm_irqchip_in_kernel()) {
4285 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
4286 }
4287 return 0;
4288 }
4289
kvm_get_apic(X86CPU * cpu)4290 static int kvm_get_apic(X86CPU *cpu)
4291 {
4292 DeviceState *apic = cpu->apic_state;
4293 struct kvm_lapic_state kapic;
4294 int ret;
4295
4296 if (apic && kvm_irqchip_in_kernel()) {
4297 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
4298 if (ret < 0) {
4299 return ret;
4300 }
4301
4302 kvm_get_apic_state(apic, &kapic);
4303 }
4304 return 0;
4305 }
4306
kvm_put_vcpu_events(X86CPU * cpu,int level)4307 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
4308 {
4309 CPUState *cs = CPU(cpu);
4310 CPUX86State *env = &cpu->env;
4311 struct kvm_vcpu_events events = {};
4312
4313 events.flags = 0;
4314
4315 if (has_exception_payload) {
4316 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
4317 events.exception.pending = env->exception_pending;
4318 events.exception_has_payload = env->exception_has_payload;
4319 events.exception_payload = env->exception_payload;
4320 }
4321 events.exception.nr = env->exception_nr;
4322 events.exception.injected = env->exception_injected;
4323 events.exception.has_error_code = env->has_error_code;
4324 events.exception.error_code = env->error_code;
4325
4326 events.interrupt.injected = (env->interrupt_injected >= 0);
4327 events.interrupt.nr = env->interrupt_injected;
4328 events.interrupt.soft = env->soft_interrupt;
4329
4330 events.nmi.injected = env->nmi_injected;
4331 events.nmi.pending = env->nmi_pending;
4332 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
4333
4334 events.sipi_vector = env->sipi_vector;
4335
4336 if (has_msr_smbase) {
4337 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
4338 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
4339 if (kvm_irqchip_in_kernel()) {
4340 /* As soon as these are moved to the kernel, remove them
4341 * from cs->interrupt_request.
4342 */
4343 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
4344 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
4345 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
4346 } else {
4347 /* Keep these in cs->interrupt_request. */
4348 events.smi.pending = 0;
4349 events.smi.latched_init = 0;
4350 }
4351 /* Stop SMI delivery on old machine types to avoid a reboot
4352 * on an inward migration of an old VM.
4353 */
4354 if (!cpu->kvm_no_smi_migration) {
4355 events.flags |= KVM_VCPUEVENT_VALID_SMM;
4356 }
4357 }
4358
4359 if (level >= KVM_PUT_RESET_STATE) {
4360 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
4361 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
4362 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
4363 }
4364 }
4365
4366 if (has_triple_fault_event) {
4367 events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
4368 events.triple_fault.pending = env->triple_fault_pending;
4369 }
4370
4371 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
4372 }
4373
kvm_get_vcpu_events(X86CPU * cpu)4374 static int kvm_get_vcpu_events(X86CPU *cpu)
4375 {
4376 CPUX86State *env = &cpu->env;
4377 struct kvm_vcpu_events events;
4378 int ret;
4379
4380 memset(&events, 0, sizeof(events));
4381 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
4382 if (ret < 0) {
4383 return ret;
4384 }
4385
4386 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
4387 env->exception_pending = events.exception.pending;
4388 env->exception_has_payload = events.exception_has_payload;
4389 env->exception_payload = events.exception_payload;
4390 } else {
4391 env->exception_pending = 0;
4392 env->exception_has_payload = false;
4393 }
4394 env->exception_injected = events.exception.injected;
4395 env->exception_nr =
4396 (env->exception_pending || env->exception_injected) ?
4397 events.exception.nr : -1;
4398 env->has_error_code = events.exception.has_error_code;
4399 env->error_code = events.exception.error_code;
4400
4401 env->interrupt_injected =
4402 events.interrupt.injected ? events.interrupt.nr : -1;
4403 env->soft_interrupt = events.interrupt.soft;
4404
4405 env->nmi_injected = events.nmi.injected;
4406 env->nmi_pending = events.nmi.pending;
4407 if (events.nmi.masked) {
4408 env->hflags2 |= HF2_NMI_MASK;
4409 } else {
4410 env->hflags2 &= ~HF2_NMI_MASK;
4411 }
4412
4413 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
4414 if (events.smi.smm) {
4415 env->hflags |= HF_SMM_MASK;
4416 } else {
4417 env->hflags &= ~HF_SMM_MASK;
4418 }
4419 if (events.smi.pending) {
4420 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4421 } else {
4422 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4423 }
4424 if (events.smi.smm_inside_nmi) {
4425 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
4426 } else {
4427 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
4428 }
4429 if (events.smi.latched_init) {
4430 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4431 } else {
4432 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4433 }
4434 }
4435
4436 if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
4437 env->triple_fault_pending = events.triple_fault.pending;
4438 }
4439
4440 env->sipi_vector = events.sipi_vector;
4441
4442 return 0;
4443 }
4444
kvm_put_debugregs(X86CPU * cpu)4445 static int kvm_put_debugregs(X86CPU *cpu)
4446 {
4447 CPUX86State *env = &cpu->env;
4448 struct kvm_debugregs dbgregs;
4449 int i;
4450
4451 memset(&dbgregs, 0, sizeof(dbgregs));
4452 for (i = 0; i < 4; i++) {
4453 dbgregs.db[i] = env->dr[i];
4454 }
4455 dbgregs.dr6 = env->dr[6];
4456 dbgregs.dr7 = env->dr[7];
4457 dbgregs.flags = 0;
4458
4459 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
4460 }
4461
kvm_get_debugregs(X86CPU * cpu)4462 static int kvm_get_debugregs(X86CPU *cpu)
4463 {
4464 CPUX86State *env = &cpu->env;
4465 struct kvm_debugregs dbgregs;
4466 int i, ret;
4467
4468 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
4469 if (ret < 0) {
4470 return ret;
4471 }
4472 for (i = 0; i < 4; i++) {
4473 env->dr[i] = dbgregs.db[i];
4474 }
4475 env->dr[4] = env->dr[6] = dbgregs.dr6;
4476 env->dr[5] = env->dr[7] = dbgregs.dr7;
4477
4478 return 0;
4479 }
4480
kvm_put_nested_state(X86CPU * cpu)4481 static int kvm_put_nested_state(X86CPU *cpu)
4482 {
4483 CPUX86State *env = &cpu->env;
4484 int max_nested_state_len = kvm_max_nested_state_length();
4485
4486 if (!env->nested_state) {
4487 return 0;
4488 }
4489
4490 /*
4491 * Copy flags that are affected by reset from env->hflags and env->hflags2.
4492 */
4493 if (env->hflags & HF_GUEST_MASK) {
4494 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
4495 } else {
4496 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
4497 }
4498
4499 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
4500 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
4501 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
4502 } else {
4503 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
4504 }
4505
4506 assert(env->nested_state->size <= max_nested_state_len);
4507 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
4508 }
4509
kvm_get_nested_state(X86CPU * cpu)4510 static int kvm_get_nested_state(X86CPU *cpu)
4511 {
4512 CPUX86State *env = &cpu->env;
4513 int max_nested_state_len = kvm_max_nested_state_length();
4514 int ret;
4515
4516 if (!env->nested_state) {
4517 return 0;
4518 }
4519
4520 /*
4521 * It is possible that migration restored a smaller size into
4522 * nested_state->hdr.size than what our kernel support.
4523 * We preserve migration origin nested_state->hdr.size for
4524 * call to KVM_SET_NESTED_STATE but wish that our next call
4525 * to KVM_GET_NESTED_STATE will use max size our kernel support.
4526 */
4527 env->nested_state->size = max_nested_state_len;
4528
4529 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
4530 if (ret < 0) {
4531 return ret;
4532 }
4533
4534 /*
4535 * Copy flags that are affected by reset to env->hflags and env->hflags2.
4536 */
4537 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
4538 env->hflags |= HF_GUEST_MASK;
4539 } else {
4540 env->hflags &= ~HF_GUEST_MASK;
4541 }
4542
4543 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
4544 if (cpu_has_svm(env)) {
4545 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
4546 env->hflags2 |= HF2_GIF_MASK;
4547 } else {
4548 env->hflags2 &= ~HF2_GIF_MASK;
4549 }
4550 }
4551
4552 return ret;
4553 }
4554
kvm_arch_put_registers(CPUState * cpu,int level)4555 int kvm_arch_put_registers(CPUState *cpu, int level)
4556 {
4557 X86CPU *x86_cpu = X86_CPU(cpu);
4558 int ret;
4559
4560 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
4561
4562 /*
4563 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
4564 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
4565 * precede kvm_put_nested_state() when 'real' nested state is set.
4566 */
4567 if (level >= KVM_PUT_RESET_STATE) {
4568 ret = kvm_put_msr_feature_control(x86_cpu);
4569 if (ret < 0) {
4570 return ret;
4571 }
4572 }
4573
4574 /* must be before kvm_put_nested_state so that EFER.SVME is set */
4575 ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
4576 if (ret < 0) {
4577 return ret;
4578 }
4579
4580 if (level >= KVM_PUT_RESET_STATE) {
4581 ret = kvm_put_nested_state(x86_cpu);
4582 if (ret < 0) {
4583 return ret;
4584 }
4585 }
4586
4587 if (level == KVM_PUT_FULL_STATE) {
4588 /* We don't check for kvm_arch_set_tsc_khz() errors here,
4589 * because TSC frequency mismatch shouldn't abort migration,
4590 * unless the user explicitly asked for a more strict TSC
4591 * setting (e.g. using an explicit "tsc-freq" option).
4592 */
4593 kvm_arch_set_tsc_khz(cpu);
4594 }
4595
4596 #ifdef CONFIG_XEN_EMU
4597 if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) {
4598 ret = kvm_put_xen_state(cpu);
4599 if (ret < 0) {
4600 return ret;
4601 }
4602 }
4603 #endif
4604
4605 ret = kvm_getput_regs(x86_cpu, 1);
4606 if (ret < 0) {
4607 return ret;
4608 }
4609 ret = kvm_put_xsave(x86_cpu);
4610 if (ret < 0) {
4611 return ret;
4612 }
4613 ret = kvm_put_xcrs(x86_cpu);
4614 if (ret < 0) {
4615 return ret;
4616 }
4617 ret = kvm_put_msrs(x86_cpu, level);
4618 if (ret < 0) {
4619 return ret;
4620 }
4621 ret = kvm_put_vcpu_events(x86_cpu, level);
4622 if (ret < 0) {
4623 return ret;
4624 }
4625 if (level >= KVM_PUT_RESET_STATE) {
4626 ret = kvm_put_mp_state(x86_cpu);
4627 if (ret < 0) {
4628 return ret;
4629 }
4630 }
4631
4632 ret = kvm_put_tscdeadline_msr(x86_cpu);
4633 if (ret < 0) {
4634 return ret;
4635 }
4636 ret = kvm_put_debugregs(x86_cpu);
4637 if (ret < 0) {
4638 return ret;
4639 }
4640 return 0;
4641 }
4642
kvm_arch_get_registers(CPUState * cs)4643 int kvm_arch_get_registers(CPUState *cs)
4644 {
4645 X86CPU *cpu = X86_CPU(cs);
4646 int ret;
4647
4648 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
4649
4650 ret = kvm_get_vcpu_events(cpu);
4651 if (ret < 0) {
4652 goto out;
4653 }
4654 /*
4655 * KVM_GET_MPSTATE can modify CS and RIP, call it before
4656 * KVM_GET_REGS and KVM_GET_SREGS.
4657 */
4658 ret = kvm_get_mp_state(cpu);
4659 if (ret < 0) {
4660 goto out;
4661 }
4662 ret = kvm_getput_regs(cpu, 0);
4663 if (ret < 0) {
4664 goto out;
4665 }
4666 ret = kvm_get_xsave(cpu);
4667 if (ret < 0) {
4668 goto out;
4669 }
4670 ret = kvm_get_xcrs(cpu);
4671 if (ret < 0) {
4672 goto out;
4673 }
4674 ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
4675 if (ret < 0) {
4676 goto out;
4677 }
4678 ret = kvm_get_msrs(cpu);
4679 if (ret < 0) {
4680 goto out;
4681 }
4682 ret = kvm_get_apic(cpu);
4683 if (ret < 0) {
4684 goto out;
4685 }
4686 ret = kvm_get_debugregs(cpu);
4687 if (ret < 0) {
4688 goto out;
4689 }
4690 ret = kvm_get_nested_state(cpu);
4691 if (ret < 0) {
4692 goto out;
4693 }
4694 #ifdef CONFIG_XEN_EMU
4695 if (xen_mode == XEN_EMULATE) {
4696 ret = kvm_get_xen_state(cs);
4697 if (ret < 0) {
4698 goto out;
4699 }
4700 }
4701 #endif
4702 ret = 0;
4703 out:
4704 cpu_sync_bndcs_hflags(&cpu->env);
4705 return ret;
4706 }
4707
kvm_arch_pre_run(CPUState * cpu,struct kvm_run * run)4708 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4709 {
4710 X86CPU *x86_cpu = X86_CPU(cpu);
4711 CPUX86State *env = &x86_cpu->env;
4712 int ret;
4713
4714 /* Inject NMI */
4715 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4716 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4717 qemu_mutex_lock_iothread();
4718 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4719 qemu_mutex_unlock_iothread();
4720 DPRINTF("injected NMI\n");
4721 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4722 if (ret < 0) {
4723 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4724 strerror(-ret));
4725 }
4726 }
4727 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4728 qemu_mutex_lock_iothread();
4729 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4730 qemu_mutex_unlock_iothread();
4731 DPRINTF("injected SMI\n");
4732 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4733 if (ret < 0) {
4734 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4735 strerror(-ret));
4736 }
4737 }
4738 }
4739
4740 if (!kvm_pic_in_kernel()) {
4741 qemu_mutex_lock_iothread();
4742 }
4743
4744 /* Force the VCPU out of its inner loop to process any INIT requests
4745 * or (for userspace APIC, but it is cheap to combine the checks here)
4746 * pending TPR access reports.
4747 */
4748 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4749 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4750 !(env->hflags & HF_SMM_MASK)) {
4751 cpu->exit_request = 1;
4752 }
4753 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4754 cpu->exit_request = 1;
4755 }
4756 }
4757
4758 if (!kvm_pic_in_kernel()) {
4759 /* Try to inject an interrupt if the guest can accept it */
4760 if (run->ready_for_interrupt_injection &&
4761 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4762 (env->eflags & IF_MASK)) {
4763 int irq;
4764
4765 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4766 irq = cpu_get_pic_interrupt(env);
4767 if (irq >= 0) {
4768 struct kvm_interrupt intr;
4769
4770 intr.irq = irq;
4771 DPRINTF("injected interrupt %d\n", irq);
4772 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4773 if (ret < 0) {
4774 fprintf(stderr,
4775 "KVM: injection failed, interrupt lost (%s)\n",
4776 strerror(-ret));
4777 }
4778 }
4779 }
4780
4781 /* If we have an interrupt but the guest is not ready to receive an
4782 * interrupt, request an interrupt window exit. This will
4783 * cause a return to userspace as soon as the guest is ready to
4784 * receive interrupts. */
4785 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4786 run->request_interrupt_window = 1;
4787 } else {
4788 run->request_interrupt_window = 0;
4789 }
4790
4791 DPRINTF("setting tpr\n");
4792 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4793
4794 qemu_mutex_unlock_iothread();
4795 }
4796 }
4797
kvm_rate_limit_on_bus_lock(void)4798 static void kvm_rate_limit_on_bus_lock(void)
4799 {
4800 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
4801
4802 if (delay_ns) {
4803 g_usleep(delay_ns / SCALE_US);
4804 }
4805 }
4806
kvm_arch_post_run(CPUState * cpu,struct kvm_run * run)4807 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4808 {
4809 X86CPU *x86_cpu = X86_CPU(cpu);
4810 CPUX86State *env = &x86_cpu->env;
4811
4812 if (run->flags & KVM_RUN_X86_SMM) {
4813 env->hflags |= HF_SMM_MASK;
4814 } else {
4815 env->hflags &= ~HF_SMM_MASK;
4816 }
4817 if (run->if_flag) {
4818 env->eflags |= IF_MASK;
4819 } else {
4820 env->eflags &= ~IF_MASK;
4821 }
4822 if (run->flags & KVM_RUN_X86_BUS_LOCK) {
4823 kvm_rate_limit_on_bus_lock();
4824 }
4825
4826 #ifdef CONFIG_XEN_EMU
4827 /*
4828 * If the callback is asserted as a GSI (or PCI INTx) then check if
4829 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
4830 * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
4831 * EOI and only resample then, exactly how the VFIO eventfd pairs
4832 * are designed to work for level triggered interrupts.
4833 */
4834 if (x86_cpu->env.xen_callback_asserted) {
4835 kvm_xen_maybe_deassert_callback(cpu);
4836 }
4837 #endif
4838
4839 /* We need to protect the apic state against concurrent accesses from
4840 * different threads in case the userspace irqchip is used. */
4841 if (!kvm_irqchip_in_kernel()) {
4842 qemu_mutex_lock_iothread();
4843 }
4844 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4845 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4846 if (!kvm_irqchip_in_kernel()) {
4847 qemu_mutex_unlock_iothread();
4848 }
4849 return cpu_get_mem_attrs(env);
4850 }
4851
kvm_arch_process_async_events(CPUState * cs)4852 int kvm_arch_process_async_events(CPUState *cs)
4853 {
4854 X86CPU *cpu = X86_CPU(cs);
4855 CPUX86State *env = &cpu->env;
4856
4857 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4858 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4859 assert(env->mcg_cap);
4860
4861 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4862
4863 kvm_cpu_synchronize_state(cs);
4864
4865 if (env->exception_nr == EXCP08_DBLE) {
4866 /* this means triple fault */
4867 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4868 cs->exit_request = 1;
4869 return 0;
4870 }
4871 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4872 env->has_error_code = 0;
4873
4874 cs->halted = 0;
4875 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4876 env->mp_state = KVM_MP_STATE_RUNNABLE;
4877 }
4878 }
4879
4880 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4881 !(env->hflags & HF_SMM_MASK)) {
4882 kvm_cpu_synchronize_state(cs);
4883 do_cpu_init(cpu);
4884 }
4885
4886 if (kvm_irqchip_in_kernel()) {
4887 return 0;
4888 }
4889
4890 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4891 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4892 apic_poll_irq(cpu->apic_state);
4893 }
4894 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4895 (env->eflags & IF_MASK)) ||
4896 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4897 cs->halted = 0;
4898 }
4899 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4900 kvm_cpu_synchronize_state(cs);
4901 do_cpu_sipi(cpu);
4902 }
4903 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4904 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4905 kvm_cpu_synchronize_state(cs);
4906 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4907 env->tpr_access_type);
4908 }
4909
4910 return cs->halted;
4911 }
4912
kvm_handle_halt(X86CPU * cpu)4913 static int kvm_handle_halt(X86CPU *cpu)
4914 {
4915 CPUState *cs = CPU(cpu);
4916 CPUX86State *env = &cpu->env;
4917
4918 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4919 (env->eflags & IF_MASK)) &&
4920 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4921 cs->halted = 1;
4922 return EXCP_HLT;
4923 }
4924
4925 return 0;
4926 }
4927
kvm_handle_tpr_access(X86CPU * cpu)4928 static int kvm_handle_tpr_access(X86CPU *cpu)
4929 {
4930 CPUState *cs = CPU(cpu);
4931 struct kvm_run *run = cs->kvm_run;
4932
4933 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4934 run->tpr_access.is_write ? TPR_ACCESS_WRITE
4935 : TPR_ACCESS_READ);
4936 return 1;
4937 }
4938
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4939 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4940 {
4941 static const uint8_t int3 = 0xcc;
4942
4943 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4944 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4945 return -EINVAL;
4946 }
4947 return 0;
4948 }
4949
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)4950 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4951 {
4952 uint8_t int3;
4953
4954 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
4955 return -EINVAL;
4956 }
4957 if (int3 != 0xcc) {
4958 return 0;
4959 }
4960 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4961 return -EINVAL;
4962 }
4963 return 0;
4964 }
4965
4966 static struct {
4967 target_ulong addr;
4968 int len;
4969 int type;
4970 } hw_breakpoint[4];
4971
4972 static int nb_hw_breakpoint;
4973
find_hw_breakpoint(target_ulong addr,int len,int type)4974 static int find_hw_breakpoint(target_ulong addr, int len, int type)
4975 {
4976 int n;
4977
4978 for (n = 0; n < nb_hw_breakpoint; n++) {
4979 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4980 (hw_breakpoint[n].len == len || len == -1)) {
4981 return n;
4982 }
4983 }
4984 return -1;
4985 }
4986
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)4987 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
4988 {
4989 switch (type) {
4990 case GDB_BREAKPOINT_HW:
4991 len = 1;
4992 break;
4993 case GDB_WATCHPOINT_WRITE:
4994 case GDB_WATCHPOINT_ACCESS:
4995 switch (len) {
4996 case 1:
4997 break;
4998 case 2:
4999 case 4:
5000 case 8:
5001 if (addr & (len - 1)) {
5002 return -EINVAL;
5003 }
5004 break;
5005 default:
5006 return -EINVAL;
5007 }
5008 break;
5009 default:
5010 return -ENOSYS;
5011 }
5012
5013 if (nb_hw_breakpoint == 4) {
5014 return -ENOBUFS;
5015 }
5016 if (find_hw_breakpoint(addr, len, type) >= 0) {
5017 return -EEXIST;
5018 }
5019 hw_breakpoint[nb_hw_breakpoint].addr = addr;
5020 hw_breakpoint[nb_hw_breakpoint].len = len;
5021 hw_breakpoint[nb_hw_breakpoint].type = type;
5022 nb_hw_breakpoint++;
5023
5024 return 0;
5025 }
5026
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)5027 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
5028 {
5029 int n;
5030
5031 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
5032 if (n < 0) {
5033 return -ENOENT;
5034 }
5035 nb_hw_breakpoint--;
5036 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
5037
5038 return 0;
5039 }
5040
kvm_arch_remove_all_hw_breakpoints(void)5041 void kvm_arch_remove_all_hw_breakpoints(void)
5042 {
5043 nb_hw_breakpoint = 0;
5044 }
5045
5046 static CPUWatchpoint hw_watchpoint;
5047
kvm_handle_debug(X86CPU * cpu,struct kvm_debug_exit_arch * arch_info)5048 static int kvm_handle_debug(X86CPU *cpu,
5049 struct kvm_debug_exit_arch *arch_info)
5050 {
5051 CPUState *cs = CPU(cpu);
5052 CPUX86State *env = &cpu->env;
5053 int ret = 0;
5054 int n;
5055
5056 if (arch_info->exception == EXCP01_DB) {
5057 if (arch_info->dr6 & DR6_BS) {
5058 if (cs->singlestep_enabled) {
5059 ret = EXCP_DEBUG;
5060 }
5061 } else {
5062 for (n = 0; n < 4; n++) {
5063 if (arch_info->dr6 & (1 << n)) {
5064 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
5065 case 0x0:
5066 ret = EXCP_DEBUG;
5067 break;
5068 case 0x1:
5069 ret = EXCP_DEBUG;
5070 cs->watchpoint_hit = &hw_watchpoint;
5071 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5072 hw_watchpoint.flags = BP_MEM_WRITE;
5073 break;
5074 case 0x3:
5075 ret = EXCP_DEBUG;
5076 cs->watchpoint_hit = &hw_watchpoint;
5077 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5078 hw_watchpoint.flags = BP_MEM_ACCESS;
5079 break;
5080 }
5081 }
5082 }
5083 }
5084 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
5085 ret = EXCP_DEBUG;
5086 }
5087 if (ret == 0) {
5088 cpu_synchronize_state(cs);
5089 assert(env->exception_nr == -1);
5090
5091 /* pass to guest */
5092 kvm_queue_exception(env, arch_info->exception,
5093 arch_info->exception == EXCP01_DB,
5094 arch_info->dr6);
5095 env->has_error_code = 0;
5096 }
5097
5098 return ret;
5099 }
5100
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)5101 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
5102 {
5103 const uint8_t type_code[] = {
5104 [GDB_BREAKPOINT_HW] = 0x0,
5105 [GDB_WATCHPOINT_WRITE] = 0x1,
5106 [GDB_WATCHPOINT_ACCESS] = 0x3
5107 };
5108 const uint8_t len_code[] = {
5109 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5110 };
5111 int n;
5112
5113 if (kvm_sw_breakpoints_active(cpu)) {
5114 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
5115 }
5116 if (nb_hw_breakpoint > 0) {
5117 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
5118 dbg->arch.debugreg[7] = 0x0600;
5119 for (n = 0; n < nb_hw_breakpoint; n++) {
5120 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
5121 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
5122 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
5123 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
5124 }
5125 }
5126 }
5127
kvm_install_msr_filters(KVMState * s)5128 static bool kvm_install_msr_filters(KVMState *s)
5129 {
5130 uint64_t zero = 0;
5131 struct kvm_msr_filter filter = {
5132 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
5133 };
5134 int r, i, j = 0;
5135
5136 for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
5137 KVMMSRHandlers *handler = &msr_handlers[i];
5138 if (handler->msr) {
5139 struct kvm_msr_filter_range *range = &filter.ranges[j++];
5140
5141 *range = (struct kvm_msr_filter_range) {
5142 .flags = 0,
5143 .nmsrs = 1,
5144 .base = handler->msr,
5145 .bitmap = (__u8 *)&zero,
5146 };
5147
5148 if (handler->rdmsr) {
5149 range->flags |= KVM_MSR_FILTER_READ;
5150 }
5151
5152 if (handler->wrmsr) {
5153 range->flags |= KVM_MSR_FILTER_WRITE;
5154 }
5155 }
5156 }
5157
5158 r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
5159 if (r) {
5160 return false;
5161 }
5162
5163 return true;
5164 }
5165
kvm_filter_msr(KVMState * s,uint32_t msr,QEMURDMSRHandler * rdmsr,QEMUWRMSRHandler * wrmsr)5166 bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
5167 QEMUWRMSRHandler *wrmsr)
5168 {
5169 int i;
5170
5171 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5172 if (!msr_handlers[i].msr) {
5173 msr_handlers[i] = (KVMMSRHandlers) {
5174 .msr = msr,
5175 .rdmsr = rdmsr,
5176 .wrmsr = wrmsr,
5177 };
5178
5179 if (!kvm_install_msr_filters(s)) {
5180 msr_handlers[i] = (KVMMSRHandlers) { };
5181 return false;
5182 }
5183
5184 return true;
5185 }
5186 }
5187
5188 return false;
5189 }
5190
kvm_handle_rdmsr(X86CPU * cpu,struct kvm_run * run)5191 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
5192 {
5193 int i;
5194 bool r;
5195
5196 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5197 KVMMSRHandlers *handler = &msr_handlers[i];
5198 if (run->msr.index == handler->msr) {
5199 if (handler->rdmsr) {
5200 r = handler->rdmsr(cpu, handler->msr,
5201 (uint64_t *)&run->msr.data);
5202 run->msr.error = r ? 0 : 1;
5203 return 0;
5204 }
5205 }
5206 }
5207
5208 assert(false);
5209 }
5210
kvm_handle_wrmsr(X86CPU * cpu,struct kvm_run * run)5211 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
5212 {
5213 int i;
5214 bool r;
5215
5216 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5217 KVMMSRHandlers *handler = &msr_handlers[i];
5218 if (run->msr.index == handler->msr) {
5219 if (handler->wrmsr) {
5220 r = handler->wrmsr(cpu, handler->msr, run->msr.data);
5221 run->msr.error = r ? 0 : 1;
5222 return 0;
5223 }
5224 }
5225 }
5226
5227 assert(false);
5228 }
5229
5230 static bool has_sgx_provisioning;
5231
__kvm_enable_sgx_provisioning(KVMState * s)5232 static bool __kvm_enable_sgx_provisioning(KVMState *s)
5233 {
5234 int fd, ret;
5235
5236 if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
5237 return false;
5238 }
5239
5240 fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
5241 if (fd < 0) {
5242 return false;
5243 }
5244
5245 ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
5246 if (ret) {
5247 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
5248 exit(1);
5249 }
5250 close(fd);
5251 return true;
5252 }
5253
kvm_enable_sgx_provisioning(KVMState * s)5254 bool kvm_enable_sgx_provisioning(KVMState *s)
5255 {
5256 return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
5257 }
5258
host_supports_vmx(void)5259 static bool host_supports_vmx(void)
5260 {
5261 uint32_t ecx, unused;
5262
5263 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
5264 return ecx & CPUID_EXT_VMX;
5265 }
5266
5267 #define VMX_INVALID_GUEST_STATE 0x80000021
5268
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)5269 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
5270 {
5271 X86CPU *cpu = X86_CPU(cs);
5272 uint64_t code;
5273 int ret;
5274 bool ctx_invalid;
5275 char str[256];
5276 KVMState *state;
5277
5278 switch (run->exit_reason) {
5279 case KVM_EXIT_HLT:
5280 DPRINTF("handle_hlt\n");
5281 qemu_mutex_lock_iothread();
5282 ret = kvm_handle_halt(cpu);
5283 qemu_mutex_unlock_iothread();
5284 break;
5285 case KVM_EXIT_SET_TPR:
5286 ret = 0;
5287 break;
5288 case KVM_EXIT_TPR_ACCESS:
5289 qemu_mutex_lock_iothread();
5290 ret = kvm_handle_tpr_access(cpu);
5291 qemu_mutex_unlock_iothread();
5292 break;
5293 case KVM_EXIT_FAIL_ENTRY:
5294 code = run->fail_entry.hardware_entry_failure_reason;
5295 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
5296 code);
5297 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
5298 fprintf(stderr,
5299 "\nIf you're running a guest on an Intel machine without "
5300 "unrestricted mode\n"
5301 "support, the failure can be most likely due to the guest "
5302 "entering an invalid\n"
5303 "state for Intel VT. For example, the guest maybe running "
5304 "in big real mode\n"
5305 "which is not supported on less recent Intel processors."
5306 "\n\n");
5307 }
5308 ret = -1;
5309 break;
5310 case KVM_EXIT_EXCEPTION:
5311 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
5312 run->ex.exception, run->ex.error_code);
5313 ret = -1;
5314 break;
5315 case KVM_EXIT_DEBUG:
5316 DPRINTF("kvm_exit_debug\n");
5317 qemu_mutex_lock_iothread();
5318 ret = kvm_handle_debug(cpu, &run->debug.arch);
5319 qemu_mutex_unlock_iothread();
5320 break;
5321 case KVM_EXIT_HYPERV:
5322 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
5323 break;
5324 case KVM_EXIT_IOAPIC_EOI:
5325 ioapic_eoi_broadcast(run->eoi.vector);
5326 ret = 0;
5327 break;
5328 case KVM_EXIT_X86_BUS_LOCK:
5329 /* already handled in kvm_arch_post_run */
5330 ret = 0;
5331 break;
5332 case KVM_EXIT_NOTIFY:
5333 ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
5334 state = KVM_STATE(current_accel());
5335 sprintf(str, "Encounter a notify exit with %svalid context in"
5336 " guest. There can be possible misbehaves in guest."
5337 " Please have a look.", ctx_invalid ? "in" : "");
5338 if (ctx_invalid ||
5339 state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
5340 warn_report("KVM internal error: %s", str);
5341 ret = -1;
5342 } else {
5343 warn_report_once("KVM: %s", str);
5344 ret = 0;
5345 }
5346 break;
5347 case KVM_EXIT_X86_RDMSR:
5348 /* We only enable MSR filtering, any other exit is bogus */
5349 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5350 ret = kvm_handle_rdmsr(cpu, run);
5351 break;
5352 case KVM_EXIT_X86_WRMSR:
5353 /* We only enable MSR filtering, any other exit is bogus */
5354 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5355 ret = kvm_handle_wrmsr(cpu, run);
5356 break;
5357 #ifdef CONFIG_XEN_EMU
5358 case KVM_EXIT_XEN:
5359 ret = kvm_xen_handle_exit(cpu, &run->xen);
5360 break;
5361 #endif
5362 default:
5363 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
5364 ret = -1;
5365 break;
5366 }
5367
5368 return ret;
5369 }
5370
kvm_arch_stop_on_emulation_error(CPUState * cs)5371 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
5372 {
5373 X86CPU *cpu = X86_CPU(cs);
5374 CPUX86State *env = &cpu->env;
5375
5376 kvm_cpu_synchronize_state(cs);
5377 return !(env->cr[0] & CR0_PE_MASK) ||
5378 ((env->segs[R_CS].selector & 3) != 3);
5379 }
5380
kvm_arch_init_irq_routing(KVMState * s)5381 void kvm_arch_init_irq_routing(KVMState *s)
5382 {
5383 /* We know at this point that we're using the in-kernel
5384 * irqchip, so we can use irqfds, and on x86 we know
5385 * we can use msi via irqfd and GSI routing.
5386 */
5387 kvm_msi_via_irqfd_allowed = true;
5388 kvm_gsi_routing_allowed = true;
5389
5390 if (kvm_irqchip_is_split()) {
5391 KVMRouteChange c = kvm_irqchip_begin_route_changes(s);
5392 int i;
5393
5394 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
5395 MSI routes for signaling interrupts to the local apics. */
5396 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
5397 if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) {
5398 error_report("Could not enable split IRQ mode.");
5399 exit(1);
5400 }
5401 }
5402 kvm_irqchip_commit_route_changes(&c);
5403 }
5404 }
5405
kvm_arch_irqchip_create(KVMState * s)5406 int kvm_arch_irqchip_create(KVMState *s)
5407 {
5408 int ret;
5409 if (kvm_kernel_irqchip_split()) {
5410 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
5411 if (ret) {
5412 error_report("Could not enable split irqchip mode: %s",
5413 strerror(-ret));
5414 exit(1);
5415 } else {
5416 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
5417 kvm_split_irqchip = true;
5418 return 1;
5419 }
5420 } else {
5421 return 0;
5422 }
5423 }
5424
kvm_swizzle_msi_ext_dest_id(uint64_t address)5425 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
5426 {
5427 CPUX86State *env;
5428 uint64_t ext_id;
5429
5430 if (!first_cpu) {
5431 return address;
5432 }
5433 env = &X86_CPU(first_cpu)->env;
5434 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
5435 return address;
5436 }
5437
5438 /*
5439 * If the remappable format bit is set, or the upper bits are
5440 * already set in address_hi, or the low extended bits aren't
5441 * there anyway, do nothing.
5442 */
5443 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
5444 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
5445 return address;
5446 }
5447
5448 address &= ~ext_id;
5449 address |= ext_id << 35;
5450 return address;
5451 }
5452
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)5453 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
5454 uint64_t address, uint32_t data, PCIDevice *dev)
5455 {
5456 X86IOMMUState *iommu = x86_iommu_get_default();
5457
5458 if (iommu) {
5459 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
5460
5461 if (class->int_remap) {
5462 int ret;
5463 MSIMessage src, dst;
5464
5465 src.address = route->u.msi.address_hi;
5466 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
5467 src.address |= route->u.msi.address_lo;
5468 src.data = route->u.msi.data;
5469
5470 ret = class->int_remap(iommu, &src, &dst, dev ? \
5471 pci_requester_id(dev) : \
5472 X86_IOMMU_SID_INVALID);
5473 if (ret) {
5474 trace_kvm_x86_fixup_msi_error(route->gsi);
5475 return 1;
5476 }
5477
5478 /*
5479 * Handled untranslated compatibility format interrupt with
5480 * extended destination ID in the low bits 11-5. */
5481 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
5482
5483 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
5484 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
5485 route->u.msi.data = dst.data;
5486 return 0;
5487 }
5488 }
5489
5490 #ifdef CONFIG_XEN_EMU
5491 if (xen_mode == XEN_EMULATE) {
5492 int handled = xen_evtchn_translate_pirq_msi(route, address, data);
5493
5494 /*
5495 * If it was a PIRQ and successfully routed (handled == 0) or it was
5496 * an error (handled < 0), return. If it wasn't a PIRQ, keep going.
5497 */
5498 if (handled <= 0) {
5499 return handled;
5500 }
5501 }
5502 #endif
5503
5504 address = kvm_swizzle_msi_ext_dest_id(address);
5505 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
5506 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
5507 return 0;
5508 }
5509
5510 typedef struct MSIRouteEntry MSIRouteEntry;
5511
5512 struct MSIRouteEntry {
5513 PCIDevice *dev; /* Device pointer */
5514 int vector; /* MSI/MSIX vector index */
5515 int virq; /* Virtual IRQ index */
5516 QLIST_ENTRY(MSIRouteEntry) list;
5517 };
5518
5519 /* List of used GSI routes */
5520 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
5521 QLIST_HEAD_INITIALIZER(msi_route_list);
5522
kvm_update_msi_routes_all(void * private,bool global,uint32_t index,uint32_t mask)5523 void kvm_update_msi_routes_all(void *private, bool global,
5524 uint32_t index, uint32_t mask)
5525 {
5526 int cnt = 0, vector;
5527 MSIRouteEntry *entry;
5528 MSIMessage msg;
5529 PCIDevice *dev;
5530
5531 /* TODO: explicit route update */
5532 QLIST_FOREACH(entry, &msi_route_list, list) {
5533 cnt++;
5534 vector = entry->vector;
5535 dev = entry->dev;
5536 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
5537 msg = msix_get_message(dev, vector);
5538 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
5539 msg = msi_get_message(dev, vector);
5540 } else {
5541 /*
5542 * Either MSI/MSIX is disabled for the device, or the
5543 * specific message was masked out. Skip this one.
5544 */
5545 continue;
5546 }
5547 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
5548 }
5549 kvm_irqchip_commit_routes(kvm_state);
5550 trace_kvm_x86_update_msi_routes(cnt);
5551 }
5552
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)5553 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
5554 int vector, PCIDevice *dev)
5555 {
5556 static bool notify_list_inited = false;
5557 MSIRouteEntry *entry;
5558
5559 if (!dev) {
5560 /* These are (possibly) IOAPIC routes only used for split
5561 * kernel irqchip mode, while what we are housekeeping are
5562 * PCI devices only. */
5563 return 0;
5564 }
5565
5566 entry = g_new0(MSIRouteEntry, 1);
5567 entry->dev = dev;
5568 entry->vector = vector;
5569 entry->virq = route->gsi;
5570 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
5571
5572 trace_kvm_x86_add_msi_route(route->gsi);
5573
5574 if (!notify_list_inited) {
5575 /* For the first time we do add route, add ourselves into
5576 * IOMMU's IEC notify list if needed. */
5577 X86IOMMUState *iommu = x86_iommu_get_default();
5578 if (iommu) {
5579 x86_iommu_iec_register_notifier(iommu,
5580 kvm_update_msi_routes_all,
5581 NULL);
5582 }
5583 notify_list_inited = true;
5584 }
5585 return 0;
5586 }
5587
kvm_arch_release_virq_post(int virq)5588 int kvm_arch_release_virq_post(int virq)
5589 {
5590 MSIRouteEntry *entry, *next;
5591 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
5592 if (entry->virq == virq) {
5593 trace_kvm_x86_remove_msi_route(virq);
5594 QLIST_REMOVE(entry, list);
5595 g_free(entry);
5596 break;
5597 }
5598 }
5599 return 0;
5600 }
5601
kvm_arch_msi_data_to_gsi(uint32_t data)5602 int kvm_arch_msi_data_to_gsi(uint32_t data)
5603 {
5604 abort();
5605 }
5606
kvm_has_waitpkg(void)5607 bool kvm_has_waitpkg(void)
5608 {
5609 return has_msr_umwait;
5610 }
5611
kvm_arch_cpu_check_are_resettable(void)5612 bool kvm_arch_cpu_check_are_resettable(void)
5613 {
5614 return !sev_es_enabled();
5615 }
5616
5617 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
5618
kvm_request_xsave_components(X86CPU * cpu,uint64_t mask)5619 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
5620 {
5621 KVMState *s = kvm_state;
5622 uint64_t supported;
5623
5624 mask &= XSTATE_DYNAMIC_MASK;
5625 if (!mask) {
5626 return;
5627 }
5628 /*
5629 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
5630 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
5631 * about them already because they are not supported features.
5632 */
5633 supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
5634 supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
5635 mask &= supported;
5636
5637 while (mask) {
5638 int bit = ctz64(mask);
5639 int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
5640 if (rc) {
5641 /*
5642 * Older kernel version (<5.17) do not support
5643 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
5644 * any dynamic feature from kvm_arch_get_supported_cpuid.
5645 */
5646 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
5647 "for feature bit %d", bit);
5648 }
5649 mask &= ~BIT_ULL(bit);
5650 }
5651 }
5652
kvm_arch_get_notify_vmexit(Object * obj,Error ** errp)5653 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
5654 {
5655 KVMState *s = KVM_STATE(obj);
5656 return s->notify_vmexit;
5657 }
5658
kvm_arch_set_notify_vmexit(Object * obj,int value,Error ** errp)5659 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
5660 {
5661 KVMState *s = KVM_STATE(obj);
5662
5663 if (s->fd != -1) {
5664 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5665 return;
5666 }
5667
5668 s->notify_vmexit = value;
5669 }
5670
kvm_arch_get_notify_window(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5671 static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
5672 const char *name, void *opaque,
5673 Error **errp)
5674 {
5675 KVMState *s = KVM_STATE(obj);
5676 uint32_t value = s->notify_window;
5677
5678 visit_type_uint32(v, name, &value, errp);
5679 }
5680
kvm_arch_set_notify_window(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5681 static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
5682 const char *name, void *opaque,
5683 Error **errp)
5684 {
5685 KVMState *s = KVM_STATE(obj);
5686 uint32_t value;
5687
5688 if (s->fd != -1) {
5689 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5690 return;
5691 }
5692
5693 if (!visit_type_uint32(v, name, &value, errp)) {
5694 return;
5695 }
5696
5697 s->notify_window = value;
5698 }
5699
kvm_arch_get_xen_version(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5700 static void kvm_arch_get_xen_version(Object *obj, Visitor *v,
5701 const char *name, void *opaque,
5702 Error **errp)
5703 {
5704 KVMState *s = KVM_STATE(obj);
5705 uint32_t value = s->xen_version;
5706
5707 visit_type_uint32(v, name, &value, errp);
5708 }
5709
kvm_arch_set_xen_version(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5710 static void kvm_arch_set_xen_version(Object *obj, Visitor *v,
5711 const char *name, void *opaque,
5712 Error **errp)
5713 {
5714 KVMState *s = KVM_STATE(obj);
5715 Error *error = NULL;
5716 uint32_t value;
5717
5718 visit_type_uint32(v, name, &value, &error);
5719 if (error) {
5720 error_propagate(errp, error);
5721 return;
5722 }
5723
5724 s->xen_version = value;
5725 if (value && xen_mode == XEN_DISABLED) {
5726 xen_mode = XEN_EMULATE;
5727 }
5728 }
5729
kvm_arch_get_xen_gnttab_max_frames(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5730 static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v,
5731 const char *name, void *opaque,
5732 Error **errp)
5733 {
5734 KVMState *s = KVM_STATE(obj);
5735 uint16_t value = s->xen_gnttab_max_frames;
5736
5737 visit_type_uint16(v, name, &value, errp);
5738 }
5739
kvm_arch_set_xen_gnttab_max_frames(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5740 static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v,
5741 const char *name, void *opaque,
5742 Error **errp)
5743 {
5744 KVMState *s = KVM_STATE(obj);
5745 Error *error = NULL;
5746 uint16_t value;
5747
5748 visit_type_uint16(v, name, &value, &error);
5749 if (error) {
5750 error_propagate(errp, error);
5751 return;
5752 }
5753
5754 s->xen_gnttab_max_frames = value;
5755 }
5756
kvm_arch_get_xen_evtchn_max_pirq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5757 static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v,
5758 const char *name, void *opaque,
5759 Error **errp)
5760 {
5761 KVMState *s = KVM_STATE(obj);
5762 uint16_t value = s->xen_evtchn_max_pirq;
5763
5764 visit_type_uint16(v, name, &value, errp);
5765 }
5766
kvm_arch_set_xen_evtchn_max_pirq(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)5767 static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v,
5768 const char *name, void *opaque,
5769 Error **errp)
5770 {
5771 KVMState *s = KVM_STATE(obj);
5772 Error *error = NULL;
5773 uint16_t value;
5774
5775 visit_type_uint16(v, name, &value, &error);
5776 if (error) {
5777 error_propagate(errp, error);
5778 return;
5779 }
5780
5781 s->xen_evtchn_max_pirq = value;
5782 }
5783
kvm_arch_accel_class_init(ObjectClass * oc)5784 void kvm_arch_accel_class_init(ObjectClass *oc)
5785 {
5786 object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
5787 &NotifyVmexitOption_lookup,
5788 kvm_arch_get_notify_vmexit,
5789 kvm_arch_set_notify_vmexit);
5790 object_class_property_set_description(oc, "notify-vmexit",
5791 "Enable notify VM exit");
5792
5793 object_class_property_add(oc, "notify-window", "uint32",
5794 kvm_arch_get_notify_window,
5795 kvm_arch_set_notify_window,
5796 NULL, NULL);
5797 object_class_property_set_description(oc, "notify-window",
5798 "Clock cycles without an event window "
5799 "after which a notification VM exit occurs");
5800
5801 object_class_property_add(oc, "xen-version", "uint32",
5802 kvm_arch_get_xen_version,
5803 kvm_arch_set_xen_version,
5804 NULL, NULL);
5805 object_class_property_set_description(oc, "xen-version",
5806 "Xen version to be emulated "
5807 "(in XENVER_version form "
5808 "e.g. 0x4000a for 4.10)");
5809
5810 object_class_property_add(oc, "xen-gnttab-max-frames", "uint16",
5811 kvm_arch_get_xen_gnttab_max_frames,
5812 kvm_arch_set_xen_gnttab_max_frames,
5813 NULL, NULL);
5814 object_class_property_set_description(oc, "xen-gnttab-max-frames",
5815 "Maximum number of grant table frames");
5816
5817 object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16",
5818 kvm_arch_get_xen_evtchn_max_pirq,
5819 kvm_arch_set_xen_evtchn_max_pirq,
5820 NULL, NULL);
5821 object_class_property_set_description(oc, "xen-evtchn-max-pirq",
5822 "Maximum number of Xen PIRQs");
5823 }
5824
kvm_set_max_apic_id(uint32_t max_apic_id)5825 void kvm_set_max_apic_id(uint32_t max_apic_id)
5826 {
5827 kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id);
5828 }
5829